xref: /openbmc/linux/virt/kvm/vfio.c (revision 2cc39179)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO-KVM bridge pseudo device
4  *
5  * Copyright (C) 2013 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/vfio.h>
18 #include "vfio.h"
19 
20 #ifdef CONFIG_SPAPR_TCE_IOMMU
21 #include <asm/kvm_ppc.h>
22 #endif
23 
24 struct kvm_vfio_group {
25 	struct list_head node;
26 	struct file *file;
27 };
28 
29 struct kvm_vfio {
30 	struct list_head group_list;
31 	struct mutex lock;
32 	bool noncoherent;
33 };
34 
35 static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
36 {
37 	void (*fn)(struct file *file, struct kvm *kvm);
38 
39 	fn = symbol_get(vfio_file_set_kvm);
40 	if (!fn)
41 		return;
42 
43 	fn(file, kvm);
44 
45 	symbol_put(vfio_file_set_kvm);
46 }
47 
48 static bool kvm_vfio_file_enforced_coherent(struct file *file)
49 {
50 	bool (*fn)(struct file *file);
51 	bool ret;
52 
53 	fn = symbol_get(vfio_file_enforced_coherent);
54 	if (!fn)
55 		return false;
56 
57 	ret = fn(file);
58 
59 	symbol_put(vfio_file_enforced_coherent);
60 
61 	return ret;
62 }
63 
64 static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
65 {
66 	struct iommu_group *(*fn)(struct file *file);
67 	struct iommu_group *ret;
68 
69 	fn = symbol_get(vfio_file_iommu_group);
70 	if (!fn)
71 		return NULL;
72 
73 	ret = fn(file);
74 
75 	symbol_put(vfio_file_iommu_group);
76 
77 	return ret;
78 }
79 
80 #ifdef CONFIG_SPAPR_TCE_IOMMU
81 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
82 					     struct kvm_vfio_group *kvg)
83 {
84 	struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file);
85 
86 	if (WARN_ON_ONCE(!grp))
87 		return;
88 
89 	kvm_spapr_tce_release_iommu_group(kvm, grp);
90 }
91 #endif
92 
93 /*
94  * Groups can use the same or different IOMMU domains.  If the same then
95  * adding a new group may change the coherency of groups we've previously
96  * been told about.  We don't want to care about any of that so we retest
97  * each group and bail as soon as we find one that's noncoherent.  This
98  * means we only ever [un]register_noncoherent_dma once for the whole device.
99  */
100 static void kvm_vfio_update_coherency(struct kvm_device *dev)
101 {
102 	struct kvm_vfio *kv = dev->private;
103 	bool noncoherent = false;
104 	struct kvm_vfio_group *kvg;
105 
106 	mutex_lock(&kv->lock);
107 
108 	list_for_each_entry(kvg, &kv->group_list, node) {
109 		if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
110 			noncoherent = true;
111 			break;
112 		}
113 	}
114 
115 	if (noncoherent != kv->noncoherent) {
116 		kv->noncoherent = noncoherent;
117 
118 		if (kv->noncoherent)
119 			kvm_arch_register_noncoherent_dma(dev->kvm);
120 		else
121 			kvm_arch_unregister_noncoherent_dma(dev->kvm);
122 	}
123 
124 	mutex_unlock(&kv->lock);
125 }
126 
127 static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
128 {
129 	struct kvm_vfio *kv = dev->private;
130 	struct kvm_vfio_group *kvg;
131 	struct file *filp;
132 	int ret;
133 
134 	filp = fget(fd);
135 	if (!filp)
136 		return -EBADF;
137 
138 	/* Ensure the FD is a vfio group FD.*/
139 	if (!kvm_vfio_file_iommu_group(filp)) {
140 		ret = -EINVAL;
141 		goto err_fput;
142 	}
143 
144 	mutex_lock(&kv->lock);
145 
146 	list_for_each_entry(kvg, &kv->group_list, node) {
147 		if (kvg->file == filp) {
148 			ret = -EEXIST;
149 			goto err_unlock;
150 		}
151 	}
152 
153 	kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
154 	if (!kvg) {
155 		ret = -ENOMEM;
156 		goto err_unlock;
157 	}
158 
159 	kvg->file = filp;
160 	list_add_tail(&kvg->node, &kv->group_list);
161 
162 	kvm_arch_start_assignment(dev->kvm);
163 
164 	mutex_unlock(&kv->lock);
165 
166 	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
167 	kvm_vfio_update_coherency(dev);
168 
169 	return 0;
170 err_unlock:
171 	mutex_unlock(&kv->lock);
172 err_fput:
173 	fput(filp);
174 	return ret;
175 }
176 
177 static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
178 {
179 	struct kvm_vfio *kv = dev->private;
180 	struct kvm_vfio_group *kvg;
181 	struct fd f;
182 	int ret;
183 
184 	f = fdget(fd);
185 	if (!f.file)
186 		return -EBADF;
187 
188 	ret = -ENOENT;
189 
190 	mutex_lock(&kv->lock);
191 
192 	list_for_each_entry(kvg, &kv->group_list, node) {
193 		if (kvg->file != f.file)
194 			continue;
195 
196 		list_del(&kvg->node);
197 		kvm_arch_end_assignment(dev->kvm);
198 #ifdef CONFIG_SPAPR_TCE_IOMMU
199 		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
200 #endif
201 		kvm_vfio_file_set_kvm(kvg->file, NULL);
202 		fput(kvg->file);
203 		kfree(kvg);
204 		ret = 0;
205 		break;
206 	}
207 
208 	mutex_unlock(&kv->lock);
209 
210 	fdput(f);
211 
212 	kvm_vfio_update_coherency(dev);
213 
214 	return ret;
215 }
216 
217 #ifdef CONFIG_SPAPR_TCE_IOMMU
218 static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
219 					void __user *arg)
220 {
221 	struct kvm_vfio_spapr_tce param;
222 	struct kvm_vfio *kv = dev->private;
223 	struct kvm_vfio_group *kvg;
224 	struct fd f;
225 	int ret;
226 
227 	if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
228 		return -EFAULT;
229 
230 	f = fdget(param.groupfd);
231 	if (!f.file)
232 		return -EBADF;
233 
234 	ret = -ENOENT;
235 
236 	mutex_lock(&kv->lock);
237 
238 	list_for_each_entry(kvg, &kv->group_list, node) {
239 		struct iommu_group *grp;
240 
241 		if (kvg->file != f.file)
242 			continue;
243 
244 		grp = kvm_vfio_file_iommu_group(kvg->file);
245 		if (WARN_ON_ONCE(!grp)) {
246 			ret = -EIO;
247 			goto err_fdput;
248 		}
249 
250 		ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
251 						       grp);
252 		break;
253 	}
254 
255 err_fdput:
256 	mutex_unlock(&kv->lock);
257 	fdput(f);
258 	return ret;
259 }
260 #endif
261 
262 static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
263 			      void __user *arg)
264 {
265 	int32_t __user *argp = arg;
266 	int32_t fd;
267 
268 	switch (attr) {
269 	case KVM_DEV_VFIO_GROUP_ADD:
270 		if (get_user(fd, argp))
271 			return -EFAULT;
272 		return kvm_vfio_group_add(dev, fd);
273 
274 	case KVM_DEV_VFIO_GROUP_DEL:
275 		if (get_user(fd, argp))
276 			return -EFAULT;
277 		return kvm_vfio_group_del(dev, fd);
278 
279 #ifdef CONFIG_SPAPR_TCE_IOMMU
280 	case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
281 		return kvm_vfio_group_set_spapr_tce(dev, arg);
282 #endif
283 	}
284 
285 	return -ENXIO;
286 }
287 
288 static int kvm_vfio_set_attr(struct kvm_device *dev,
289 			     struct kvm_device_attr *attr)
290 {
291 	switch (attr->group) {
292 	case KVM_DEV_VFIO_GROUP:
293 		return kvm_vfio_set_group(dev, attr->attr,
294 					  u64_to_user_ptr(attr->addr));
295 	}
296 
297 	return -ENXIO;
298 }
299 
300 static int kvm_vfio_has_attr(struct kvm_device *dev,
301 			     struct kvm_device_attr *attr)
302 {
303 	switch (attr->group) {
304 	case KVM_DEV_VFIO_GROUP:
305 		switch (attr->attr) {
306 		case KVM_DEV_VFIO_GROUP_ADD:
307 		case KVM_DEV_VFIO_GROUP_DEL:
308 #ifdef CONFIG_SPAPR_TCE_IOMMU
309 		case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
310 #endif
311 			return 0;
312 		}
313 
314 		break;
315 	}
316 
317 	return -ENXIO;
318 }
319 
320 static void kvm_vfio_destroy(struct kvm_device *dev)
321 {
322 	struct kvm_vfio *kv = dev->private;
323 	struct kvm_vfio_group *kvg, *tmp;
324 
325 	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
326 #ifdef CONFIG_SPAPR_TCE_IOMMU
327 		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
328 #endif
329 		kvm_vfio_file_set_kvm(kvg->file, NULL);
330 		fput(kvg->file);
331 		list_del(&kvg->node);
332 		kfree(kvg);
333 		kvm_arch_end_assignment(dev->kvm);
334 	}
335 
336 	kvm_vfio_update_coherency(dev);
337 
338 	kfree(kv);
339 	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
340 }
341 
342 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
343 
344 static struct kvm_device_ops kvm_vfio_ops = {
345 	.name = "kvm-vfio",
346 	.create = kvm_vfio_create,
347 	.destroy = kvm_vfio_destroy,
348 	.set_attr = kvm_vfio_set_attr,
349 	.has_attr = kvm_vfio_has_attr,
350 };
351 
352 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
353 {
354 	struct kvm_device *tmp;
355 	struct kvm_vfio *kv;
356 
357 	/* Only one VFIO "device" per VM */
358 	list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
359 		if (tmp->ops == &kvm_vfio_ops)
360 			return -EBUSY;
361 
362 	kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
363 	if (!kv)
364 		return -ENOMEM;
365 
366 	INIT_LIST_HEAD(&kv->group_list);
367 	mutex_init(&kv->lock);
368 
369 	dev->private = kv;
370 
371 	return 0;
372 }
373 
374 int kvm_vfio_ops_init(void)
375 {
376 	return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
377 }
378 
379 void kvm_vfio_ops_exit(void)
380 {
381 	kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
382 }
383