xref: /openbmc/linux/virt/kvm/vfio.c (revision 9e0f4f2918c2ff145d3dedee862d9919a6ed5812)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO-KVM bridge pseudo device
4  *
5  * Copyright (C) 2013 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/vfio.h>
18 #include "vfio.h"
19 
20 #ifdef CONFIG_SPAPR_TCE_IOMMU
21 #include <asm/kvm_ppc.h>
22 #endif
23 
24 struct kvm_vfio_file {
25 	struct list_head node;
26 	struct file *file;
27 #ifdef CONFIG_SPAPR_TCE_IOMMU
28 	struct iommu_group *iommu_group;
29 #endif
30 };
31 
32 struct kvm_vfio {
33 	struct list_head file_list;
34 	struct mutex lock;
35 	bool noncoherent;
36 };
37 
38 static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
39 {
40 	void (*fn)(struct file *file, struct kvm *kvm);
41 
42 	fn = symbol_get(vfio_file_set_kvm);
43 	if (!fn)
44 		return;
45 
46 	fn(file, kvm);
47 
48 	symbol_put(vfio_file_set_kvm);
49 }
50 
51 static bool kvm_vfio_file_enforced_coherent(struct file *file)
52 {
53 	bool (*fn)(struct file *file);
54 	bool ret;
55 
56 	fn = symbol_get(vfio_file_enforced_coherent);
57 	if (!fn)
58 		return false;
59 
60 	ret = fn(file);
61 
62 	symbol_put(vfio_file_enforced_coherent);
63 
64 	return ret;
65 }
66 
67 static bool kvm_vfio_file_is_valid(struct file *file)
68 {
69 	bool (*fn)(struct file *file);
70 	bool ret;
71 
72 	fn = symbol_get(vfio_file_is_valid);
73 	if (!fn)
74 		return false;
75 
76 	ret = fn(file);
77 
78 	symbol_put(vfio_file_is_valid);
79 
80 	return ret;
81 }
82 
83 #ifdef CONFIG_SPAPR_TCE_IOMMU
84 static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
85 {
86 	struct iommu_group *(*fn)(struct file *file);
87 	struct iommu_group *ret;
88 
89 	fn = symbol_get(vfio_file_iommu_group);
90 	if (!fn)
91 		return NULL;
92 
93 	ret = fn(file);
94 
95 	symbol_put(vfio_file_iommu_group);
96 
97 	return ret;
98 }
99 
100 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
101 					     struct kvm_vfio_file *kvf)
102 {
103 	if (WARN_ON_ONCE(!kvf->iommu_group))
104 		return;
105 
106 	kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
107 	iommu_group_put(kvf->iommu_group);
108 	kvf->iommu_group = NULL;
109 }
110 #endif
111 
112 /*
113  * Groups/devices can use the same or different IOMMU domains. If the same
114  * then adding a new group/device may change the coherency of groups/devices
115  * we've previously been told about. We don't want to care about any of
116  * that so we retest each group/device and bail as soon as we find one that's
117  * noncoherent.  This means we only ever [un]register_noncoherent_dma once
118  * for the whole device.
119  */
120 static void kvm_vfio_update_coherency(struct kvm_device *dev)
121 {
122 	struct kvm_vfio *kv = dev->private;
123 	bool noncoherent = false;
124 	struct kvm_vfio_file *kvf;
125 
126 	mutex_lock(&kv->lock);
127 
128 	list_for_each_entry(kvf, &kv->file_list, node) {
129 		if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
130 			noncoherent = true;
131 			break;
132 		}
133 	}
134 
135 	if (noncoherent != kv->noncoherent) {
136 		kv->noncoherent = noncoherent;
137 
138 		if (kv->noncoherent)
139 			kvm_arch_register_noncoherent_dma(dev->kvm);
140 		else
141 			kvm_arch_unregister_noncoherent_dma(dev->kvm);
142 	}
143 
144 	mutex_unlock(&kv->lock);
145 }
146 
147 static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
148 {
149 	struct kvm_vfio *kv = dev->private;
150 	struct kvm_vfio_file *kvf;
151 	struct file *filp;
152 	int ret;
153 
154 	filp = fget(fd);
155 	if (!filp)
156 		return -EBADF;
157 
158 	/* Ensure the FD is a vfio FD. */
159 	if (!kvm_vfio_file_is_valid(filp)) {
160 		ret = -EINVAL;
161 		goto err_fput;
162 	}
163 
164 	mutex_lock(&kv->lock);
165 
166 	list_for_each_entry(kvf, &kv->file_list, node) {
167 		if (kvf->file == filp) {
168 			ret = -EEXIST;
169 			goto err_unlock;
170 		}
171 	}
172 
173 	kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
174 	if (!kvf) {
175 		ret = -ENOMEM;
176 		goto err_unlock;
177 	}
178 
179 	kvf->file = filp;
180 	list_add_tail(&kvf->node, &kv->file_list);
181 
182 	kvm_arch_start_assignment(dev->kvm);
183 	kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
184 
185 	mutex_unlock(&kv->lock);
186 
187 	kvm_vfio_update_coherency(dev);
188 
189 	return 0;
190 err_unlock:
191 	mutex_unlock(&kv->lock);
192 err_fput:
193 	fput(filp);
194 	return ret;
195 }
196 
197 static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
198 {
199 	struct kvm_vfio *kv = dev->private;
200 	struct kvm_vfio_file *kvf;
201 	struct fd f;
202 	int ret;
203 
204 	f = fdget(fd);
205 	if (!f.file)
206 		return -EBADF;
207 
208 	ret = -ENOENT;
209 
210 	mutex_lock(&kv->lock);
211 
212 	list_for_each_entry(kvf, &kv->file_list, node) {
213 		if (kvf->file != f.file)
214 			continue;
215 
216 		list_del(&kvf->node);
217 		kvm_arch_end_assignment(dev->kvm);
218 #ifdef CONFIG_SPAPR_TCE_IOMMU
219 		kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
220 #endif
221 		kvm_vfio_file_set_kvm(kvf->file, NULL);
222 		fput(kvf->file);
223 		kfree(kvf);
224 		ret = 0;
225 		break;
226 	}
227 
228 	mutex_unlock(&kv->lock);
229 
230 	fdput(f);
231 
232 	kvm_vfio_update_coherency(dev);
233 
234 	return ret;
235 }
236 
237 #ifdef CONFIG_SPAPR_TCE_IOMMU
238 static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
239 				       void __user *arg)
240 {
241 	struct kvm_vfio_spapr_tce param;
242 	struct kvm_vfio *kv = dev->private;
243 	struct kvm_vfio_file *kvf;
244 	struct fd f;
245 	int ret;
246 
247 	if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
248 		return -EFAULT;
249 
250 	f = fdget(param.groupfd);
251 	if (!f.file)
252 		return -EBADF;
253 
254 	ret = -ENOENT;
255 
256 	mutex_lock(&kv->lock);
257 
258 	list_for_each_entry(kvf, &kv->file_list, node) {
259 		if (kvf->file != f.file)
260 			continue;
261 
262 		if (!kvf->iommu_group) {
263 			kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
264 			if (WARN_ON_ONCE(!kvf->iommu_group)) {
265 				ret = -EIO;
266 				goto err_fdput;
267 			}
268 		}
269 
270 		ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
271 						       kvf->iommu_group);
272 		break;
273 	}
274 
275 err_fdput:
276 	mutex_unlock(&kv->lock);
277 	fdput(f);
278 	return ret;
279 }
280 #endif
281 
282 static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
283 			     void __user *arg)
284 {
285 	int32_t __user *argp = arg;
286 	int32_t fd;
287 
288 	switch (attr) {
289 	case KVM_DEV_VFIO_FILE_ADD:
290 		if (get_user(fd, argp))
291 			return -EFAULT;
292 		return kvm_vfio_file_add(dev, fd);
293 
294 	case KVM_DEV_VFIO_FILE_DEL:
295 		if (get_user(fd, argp))
296 			return -EFAULT;
297 		return kvm_vfio_file_del(dev, fd);
298 
299 #ifdef CONFIG_SPAPR_TCE_IOMMU
300 	case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
301 		return kvm_vfio_file_set_spapr_tce(dev, arg);
302 #endif
303 	}
304 
305 	return -ENXIO;
306 }
307 
308 static int kvm_vfio_set_attr(struct kvm_device *dev,
309 			     struct kvm_device_attr *attr)
310 {
311 	switch (attr->group) {
312 	case KVM_DEV_VFIO_FILE:
313 		return kvm_vfio_set_file(dev, attr->attr,
314 					 u64_to_user_ptr(attr->addr));
315 	}
316 
317 	return -ENXIO;
318 }
319 
320 static int kvm_vfio_has_attr(struct kvm_device *dev,
321 			     struct kvm_device_attr *attr)
322 {
323 	switch (attr->group) {
324 	case KVM_DEV_VFIO_FILE:
325 		switch (attr->attr) {
326 		case KVM_DEV_VFIO_FILE_ADD:
327 		case KVM_DEV_VFIO_FILE_DEL:
328 #ifdef CONFIG_SPAPR_TCE_IOMMU
329 		case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
330 #endif
331 			return 0;
332 		}
333 
334 		break;
335 	}
336 
337 	return -ENXIO;
338 }
339 
340 static void kvm_vfio_release(struct kvm_device *dev)
341 {
342 	struct kvm_vfio *kv = dev->private;
343 	struct kvm_vfio_file *kvf, *tmp;
344 
345 	list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
346 #ifdef CONFIG_SPAPR_TCE_IOMMU
347 		kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
348 #endif
349 		kvm_vfio_file_set_kvm(kvf->file, NULL);
350 		fput(kvf->file);
351 		list_del(&kvf->node);
352 		kfree(kvf);
353 		kvm_arch_end_assignment(dev->kvm);
354 	}
355 
356 	kvm_vfio_update_coherency(dev);
357 
358 	kfree(kv);
359 	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
360 }
361 
362 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
363 
364 static struct kvm_device_ops kvm_vfio_ops = {
365 	.name = "kvm-vfio",
366 	.create = kvm_vfio_create,
367 	.release = kvm_vfio_release,
368 	.set_attr = kvm_vfio_set_attr,
369 	.has_attr = kvm_vfio_has_attr,
370 };
371 
372 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
373 {
374 	struct kvm_device *tmp;
375 	struct kvm_vfio *kv;
376 
377 	/* Only one VFIO "device" per VM */
378 	list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
379 		if (tmp->ops == &kvm_vfio_ops)
380 			return -EBUSY;
381 
382 	kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
383 	if (!kv)
384 		return -ENOMEM;
385 
386 	INIT_LIST_HEAD(&kv->file_list);
387 	mutex_init(&kv->lock);
388 
389 	dev->private = kv;
390 
391 	return 0;
392 }
393 
394 int kvm_vfio_ops_init(void)
395 {
396 	return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
397 }
398 
399 void kvm_vfio_ops_exit(void)
400 {
401 	kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
402 }
403