xref: /openbmc/linux/drivers/vhost/vdpa.c (revision bd4af432)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/iommu.h>
19 #include <linux/uuid.h>
20 #include <linux/vdpa.h>
21 #include <linux/nospec.h>
22 #include <linux/vhost.h>
23 #include <linux/virtio_net.h>
24 
25 #include "vhost.h"
26 
27 enum {
28 	VHOST_VDPA_FEATURES =
29 		(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
30 		(1ULL << VIRTIO_F_ANY_LAYOUT) |
31 		(1ULL << VIRTIO_F_VERSION_1) |
32 		(1ULL << VIRTIO_F_IOMMU_PLATFORM) |
33 		(1ULL << VIRTIO_F_RING_PACKED) |
34 		(1ULL << VIRTIO_F_ORDER_PLATFORM) |
35 		(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
36 		(1ULL << VIRTIO_RING_F_EVENT_IDX),
37 
38 	VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
39 		(1ULL << VIRTIO_NET_F_CSUM) |
40 		(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
41 		(1ULL << VIRTIO_NET_F_MTU) |
42 		(1ULL << VIRTIO_NET_F_MAC) |
43 		(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
44 		(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
45 		(1ULL << VIRTIO_NET_F_GUEST_ECN) |
46 		(1ULL << VIRTIO_NET_F_GUEST_UFO) |
47 		(1ULL << VIRTIO_NET_F_HOST_TSO4) |
48 		(1ULL << VIRTIO_NET_F_HOST_TSO6) |
49 		(1ULL << VIRTIO_NET_F_HOST_ECN) |
50 		(1ULL << VIRTIO_NET_F_HOST_UFO) |
51 		(1ULL << VIRTIO_NET_F_MRG_RXBUF) |
52 		(1ULL << VIRTIO_NET_F_STATUS) |
53 		(1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
54 };
55 
56 /* Currently, only network backend w/o multiqueue is supported. */
57 #define VHOST_VDPA_VQ_MAX	2
58 
59 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
60 
61 struct vhost_vdpa {
62 	struct vhost_dev vdev;
63 	struct iommu_domain *domain;
64 	struct vhost_virtqueue *vqs;
65 	struct completion completion;
66 	struct vdpa_device *vdpa;
67 	struct device dev;
68 	struct cdev cdev;
69 	atomic_t opened;
70 	int nvqs;
71 	int virtio_id;
72 	int minor;
73 };
74 
75 static DEFINE_IDA(vhost_vdpa_ida);
76 
77 static dev_t vhost_vdpa_major;
78 
79 static const u64 vhost_vdpa_features[] = {
80 	[VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
81 };
82 
83 static void handle_vq_kick(struct vhost_work *work)
84 {
85 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
86 						  poll.work);
87 	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
88 	const struct vdpa_config_ops *ops = v->vdpa->config;
89 
90 	ops->kick_vq(v->vdpa, vq - v->vqs);
91 }
92 
93 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
94 {
95 	struct vhost_virtqueue *vq = private;
96 	struct eventfd_ctx *call_ctx = vq->call_ctx;
97 
98 	if (call_ctx)
99 		eventfd_signal(call_ctx, 1);
100 
101 	return IRQ_HANDLED;
102 }
103 
104 static void vhost_vdpa_reset(struct vhost_vdpa *v)
105 {
106 	struct vdpa_device *vdpa = v->vdpa;
107 	const struct vdpa_config_ops *ops = vdpa->config;
108 
109 	ops->set_status(vdpa, 0);
110 }
111 
112 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
113 {
114 	struct vdpa_device *vdpa = v->vdpa;
115 	const struct vdpa_config_ops *ops = vdpa->config;
116 	u32 device_id;
117 
118 	device_id = ops->get_device_id(vdpa);
119 
120 	if (copy_to_user(argp, &device_id, sizeof(device_id)))
121 		return -EFAULT;
122 
123 	return 0;
124 }
125 
126 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
127 {
128 	struct vdpa_device *vdpa = v->vdpa;
129 	const struct vdpa_config_ops *ops = vdpa->config;
130 	u8 status;
131 
132 	status = ops->get_status(vdpa);
133 
134 	if (copy_to_user(statusp, &status, sizeof(status)))
135 		return -EFAULT;
136 
137 	return 0;
138 }
139 
140 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
141 {
142 	struct vdpa_device *vdpa = v->vdpa;
143 	const struct vdpa_config_ops *ops = vdpa->config;
144 	u8 status;
145 
146 	if (copy_from_user(&status, statusp, sizeof(status)))
147 		return -EFAULT;
148 
149 	/*
150 	 * Userspace shouldn't remove status bits unless reset the
151 	 * status to 0.
152 	 */
153 	if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
154 		return -EINVAL;
155 
156 	ops->set_status(vdpa, status);
157 
158 	return 0;
159 }
160 
161 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
162 				      struct vhost_vdpa_config *c)
163 {
164 	long size = 0;
165 
166 	switch (v->virtio_id) {
167 	case VIRTIO_ID_NET:
168 		size = sizeof(struct virtio_net_config);
169 		break;
170 	}
171 
172 	if (c->len == 0)
173 		return -EINVAL;
174 
175 	if (c->len > size - c->off)
176 		return -E2BIG;
177 
178 	return 0;
179 }
180 
181 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
182 				  struct vhost_vdpa_config __user *c)
183 {
184 	struct vdpa_device *vdpa = v->vdpa;
185 	const struct vdpa_config_ops *ops = vdpa->config;
186 	struct vhost_vdpa_config config;
187 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
188 	u8 *buf;
189 
190 	if (copy_from_user(&config, c, size))
191 		return -EFAULT;
192 	if (vhost_vdpa_config_validate(v, &config))
193 		return -EINVAL;
194 	buf = kvzalloc(config.len, GFP_KERNEL);
195 	if (!buf)
196 		return -ENOMEM;
197 
198 	ops->get_config(vdpa, config.off, buf, config.len);
199 
200 	if (copy_to_user(c->buf, buf, config.len)) {
201 		kvfree(buf);
202 		return -EFAULT;
203 	}
204 
205 	kvfree(buf);
206 	return 0;
207 }
208 
209 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
210 				  struct vhost_vdpa_config __user *c)
211 {
212 	struct vdpa_device *vdpa = v->vdpa;
213 	const struct vdpa_config_ops *ops = vdpa->config;
214 	struct vhost_vdpa_config config;
215 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
216 	u8 *buf;
217 
218 	if (copy_from_user(&config, c, size))
219 		return -EFAULT;
220 	if (vhost_vdpa_config_validate(v, &config))
221 		return -EINVAL;
222 	buf = kvzalloc(config.len, GFP_KERNEL);
223 	if (!buf)
224 		return -ENOMEM;
225 
226 	if (copy_from_user(buf, c->buf, config.len)) {
227 		kvfree(buf);
228 		return -EFAULT;
229 	}
230 
231 	ops->set_config(vdpa, config.off, buf, config.len);
232 
233 	kvfree(buf);
234 	return 0;
235 }
236 
237 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
238 {
239 	struct vdpa_device *vdpa = v->vdpa;
240 	const struct vdpa_config_ops *ops = vdpa->config;
241 	u64 features;
242 
243 	features = ops->get_features(vdpa);
244 	features &= vhost_vdpa_features[v->virtio_id];
245 
246 	if (copy_to_user(featurep, &features, sizeof(features)))
247 		return -EFAULT;
248 
249 	return 0;
250 }
251 
252 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
253 {
254 	struct vdpa_device *vdpa = v->vdpa;
255 	const struct vdpa_config_ops *ops = vdpa->config;
256 	u64 features;
257 
258 	/*
259 	 * It's not allowed to change the features after they have
260 	 * been negotiated.
261 	 */
262 	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
263 		return -EBUSY;
264 
265 	if (copy_from_user(&features, featurep, sizeof(features)))
266 		return -EFAULT;
267 
268 	if (features & ~vhost_vdpa_features[v->virtio_id])
269 		return -EINVAL;
270 
271 	if (ops->set_features(vdpa, features))
272 		return -EINVAL;
273 
274 	return 0;
275 }
276 
277 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
278 {
279 	struct vdpa_device *vdpa = v->vdpa;
280 	const struct vdpa_config_ops *ops = vdpa->config;
281 	u16 num;
282 
283 	num = ops->get_vq_num_max(vdpa);
284 
285 	if (copy_to_user(argp, &num, sizeof(num)))
286 		return -EFAULT;
287 
288 	return 0;
289 }
290 
291 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
292 				   void __user *argp)
293 {
294 	struct vdpa_device *vdpa = v->vdpa;
295 	const struct vdpa_config_ops *ops = vdpa->config;
296 	struct vdpa_callback cb;
297 	struct vhost_virtqueue *vq;
298 	struct vhost_vring_state s;
299 	u32 idx;
300 	long r;
301 
302 	r = get_user(idx, (u32 __user *)argp);
303 	if (r < 0)
304 		return r;
305 
306 	if (idx >= v->nvqs)
307 		return -ENOBUFS;
308 
309 	idx = array_index_nospec(idx, v->nvqs);
310 	vq = &v->vqs[idx];
311 
312 	if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
313 		if (copy_from_user(&s, argp, sizeof(s)))
314 			return -EFAULT;
315 		ops->set_vq_ready(vdpa, idx, s.num);
316 		return 0;
317 	}
318 
319 	if (cmd == VHOST_GET_VRING_BASE)
320 		vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
321 
322 	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
323 	if (r)
324 		return r;
325 
326 	switch (cmd) {
327 	case VHOST_SET_VRING_ADDR:
328 		if (ops->set_vq_address(vdpa, idx,
329 					(u64)(uintptr_t)vq->desc,
330 					(u64)(uintptr_t)vq->avail,
331 					(u64)(uintptr_t)vq->used))
332 			r = -EINVAL;
333 		break;
334 
335 	case VHOST_SET_VRING_BASE:
336 		if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
337 			r = -EINVAL;
338 		break;
339 
340 	case VHOST_SET_VRING_CALL:
341 		if (vq->call_ctx) {
342 			cb.callback = vhost_vdpa_virtqueue_cb;
343 			cb.private = vq;
344 		} else {
345 			cb.callback = NULL;
346 			cb.private = NULL;
347 		}
348 		ops->set_vq_cb(vdpa, idx, &cb);
349 		break;
350 
351 	case VHOST_SET_VRING_NUM:
352 		ops->set_vq_num(vdpa, idx, vq->num);
353 		break;
354 	}
355 
356 	return r;
357 }
358 
359 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
360 				      unsigned int cmd, unsigned long arg)
361 {
362 	struct vhost_vdpa *v = filep->private_data;
363 	struct vhost_dev *d = &v->vdev;
364 	void __user *argp = (void __user *)arg;
365 	long r;
366 
367 	mutex_lock(&d->mutex);
368 
369 	switch (cmd) {
370 	case VHOST_VDPA_GET_DEVICE_ID:
371 		r = vhost_vdpa_get_device_id(v, argp);
372 		break;
373 	case VHOST_VDPA_GET_STATUS:
374 		r = vhost_vdpa_get_status(v, argp);
375 		break;
376 	case VHOST_VDPA_SET_STATUS:
377 		r = vhost_vdpa_set_status(v, argp);
378 		break;
379 	case VHOST_VDPA_GET_CONFIG:
380 		r = vhost_vdpa_get_config(v, argp);
381 		break;
382 	case VHOST_VDPA_SET_CONFIG:
383 		r = vhost_vdpa_set_config(v, argp);
384 		break;
385 	case VHOST_GET_FEATURES:
386 		r = vhost_vdpa_get_features(v, argp);
387 		break;
388 	case VHOST_SET_FEATURES:
389 		r = vhost_vdpa_set_features(v, argp);
390 		break;
391 	case VHOST_VDPA_GET_VRING_NUM:
392 		r = vhost_vdpa_get_vring_num(v, argp);
393 		break;
394 	case VHOST_SET_LOG_BASE:
395 	case VHOST_SET_LOG_FD:
396 		r = -ENOIOCTLCMD;
397 		break;
398 	default:
399 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
400 		if (r == -ENOIOCTLCMD)
401 			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
402 		break;
403 	}
404 
405 	mutex_unlock(&d->mutex);
406 	return r;
407 }
408 
409 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
410 {
411 	struct vhost_dev *dev = &v->vdev;
412 	struct vhost_iotlb *iotlb = dev->iotlb;
413 	struct vhost_iotlb_map *map;
414 	struct page *page;
415 	unsigned long pfn, pinned;
416 
417 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
418 		pinned = map->size >> PAGE_SHIFT;
419 		for (pfn = map->addr >> PAGE_SHIFT;
420 		     pinned > 0; pfn++, pinned--) {
421 			page = pfn_to_page(pfn);
422 			if (map->perm & VHOST_ACCESS_WO)
423 				set_page_dirty_lock(page);
424 			unpin_user_page(page);
425 		}
426 		atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
427 		vhost_iotlb_map_free(iotlb, map);
428 	}
429 }
430 
431 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
432 {
433 	struct vhost_dev *dev = &v->vdev;
434 
435 	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
436 	kfree(dev->iotlb);
437 	dev->iotlb = NULL;
438 }
439 
440 static int perm_to_iommu_flags(u32 perm)
441 {
442 	int flags = 0;
443 
444 	switch (perm) {
445 	case VHOST_ACCESS_WO:
446 		flags |= IOMMU_WRITE;
447 		break;
448 	case VHOST_ACCESS_RO:
449 		flags |= IOMMU_READ;
450 		break;
451 	case VHOST_ACCESS_RW:
452 		flags |= (IOMMU_WRITE | IOMMU_READ);
453 		break;
454 	default:
455 		WARN(1, "invalidate vhost IOTLB permission\n");
456 		break;
457 	}
458 
459 	return flags | IOMMU_CACHE;
460 }
461 
462 static int vhost_vdpa_map(struct vhost_vdpa *v,
463 			  u64 iova, u64 size, u64 pa, u32 perm)
464 {
465 	struct vhost_dev *dev = &v->vdev;
466 	struct vdpa_device *vdpa = v->vdpa;
467 	const struct vdpa_config_ops *ops = vdpa->config;
468 	int r = 0;
469 
470 	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
471 				  pa, perm);
472 	if (r)
473 		return r;
474 
475 	if (ops->dma_map)
476 		r = ops->dma_map(vdpa, iova, size, pa, perm);
477 	else if (ops->set_map)
478 		r = ops->set_map(vdpa, dev->iotlb);
479 	else
480 		r = iommu_map(v->domain, iova, pa, size,
481 			      perm_to_iommu_flags(perm));
482 
483 	return r;
484 }
485 
486 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
487 {
488 	struct vhost_dev *dev = &v->vdev;
489 	struct vdpa_device *vdpa = v->vdpa;
490 	const struct vdpa_config_ops *ops = vdpa->config;
491 
492 	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
493 
494 	if (ops->dma_map)
495 		ops->dma_unmap(vdpa, iova, size);
496 	else if (ops->set_map)
497 		ops->set_map(vdpa, dev->iotlb);
498 	else
499 		iommu_unmap(v->domain, iova, size);
500 }
501 
502 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
503 					   struct vhost_iotlb_msg *msg)
504 {
505 	struct vhost_dev *dev = &v->vdev;
506 	struct vhost_iotlb *iotlb = dev->iotlb;
507 	struct page **page_list;
508 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
509 	unsigned int gup_flags = FOLL_LONGTERM;
510 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
511 	unsigned long locked, lock_limit, pinned, i;
512 	u64 iova = msg->iova;
513 	int ret = 0;
514 
515 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
516 				    msg->iova + msg->size - 1))
517 		return -EEXIST;
518 
519 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
520 	if (!page_list)
521 		return -ENOMEM;
522 
523 	if (msg->perm & VHOST_ACCESS_WO)
524 		gup_flags |= FOLL_WRITE;
525 
526 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
527 	if (!npages)
528 		return -EINVAL;
529 
530 	down_read(&dev->mm->mmap_sem);
531 
532 	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
533 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
534 
535 	if (locked > lock_limit) {
536 		ret = -ENOMEM;
537 		goto out;
538 	}
539 
540 	cur_base = msg->uaddr & PAGE_MASK;
541 	iova &= PAGE_MASK;
542 
543 	while (npages) {
544 		pinned = min_t(unsigned long, npages, list_size);
545 		ret = pin_user_pages(cur_base, pinned,
546 				     gup_flags, page_list, NULL);
547 		if (ret != pinned)
548 			goto out;
549 
550 		if (!last_pfn)
551 			map_pfn = page_to_pfn(page_list[0]);
552 
553 		for (i = 0; i < ret; i++) {
554 			unsigned long this_pfn = page_to_pfn(page_list[i]);
555 			u64 csize;
556 
557 			if (last_pfn && (this_pfn != last_pfn + 1)) {
558 				/* Pin a contiguous chunk of memory */
559 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
560 				if (vhost_vdpa_map(v, iova, csize,
561 						   map_pfn << PAGE_SHIFT,
562 						   msg->perm))
563 					goto out;
564 				map_pfn = this_pfn;
565 				iova += csize;
566 			}
567 
568 			last_pfn = this_pfn;
569 		}
570 
571 		cur_base += ret << PAGE_SHIFT;
572 		npages -= ret;
573 	}
574 
575 	/* Pin the rest chunk */
576 	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
577 			     map_pfn << PAGE_SHIFT, msg->perm);
578 out:
579 	if (ret) {
580 		vhost_vdpa_unmap(v, msg->iova, msg->size);
581 		atomic64_sub(npages, &dev->mm->pinned_vm);
582 	}
583 	up_read(&dev->mm->mmap_sem);
584 	free_page((unsigned long)page_list);
585 	return ret;
586 }
587 
588 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
589 					struct vhost_iotlb_msg *msg)
590 {
591 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
592 	int r = 0;
593 
594 	r = vhost_dev_check_owner(dev);
595 	if (r)
596 		return r;
597 
598 	switch (msg->type) {
599 	case VHOST_IOTLB_UPDATE:
600 		r = vhost_vdpa_process_iotlb_update(v, msg);
601 		break;
602 	case VHOST_IOTLB_INVALIDATE:
603 		vhost_vdpa_unmap(v, msg->iova, msg->size);
604 		break;
605 	default:
606 		r = -EINVAL;
607 		break;
608 	}
609 
610 	return r;
611 }
612 
613 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
614 					 struct iov_iter *from)
615 {
616 	struct file *file = iocb->ki_filp;
617 	struct vhost_vdpa *v = file->private_data;
618 	struct vhost_dev *dev = &v->vdev;
619 
620 	return vhost_chr_write_iter(dev, from);
621 }
622 
623 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
624 {
625 	struct vdpa_device *vdpa = v->vdpa;
626 	const struct vdpa_config_ops *ops = vdpa->config;
627 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
628 	struct bus_type *bus;
629 	int ret;
630 
631 	/* Device want to do DMA by itself */
632 	if (ops->set_map || ops->dma_map)
633 		return 0;
634 
635 	bus = dma_dev->bus;
636 	if (!bus)
637 		return -EFAULT;
638 
639 	if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
640 		return -ENOTSUPP;
641 
642 	v->domain = iommu_domain_alloc(bus);
643 	if (!v->domain)
644 		return -EIO;
645 
646 	ret = iommu_attach_device(v->domain, dma_dev);
647 	if (ret)
648 		goto err_attach;
649 
650 	return 0;
651 
652 err_attach:
653 	iommu_domain_free(v->domain);
654 	return ret;
655 }
656 
657 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
658 {
659 	struct vdpa_device *vdpa = v->vdpa;
660 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
661 
662 	if (v->domain) {
663 		iommu_detach_device(v->domain, dma_dev);
664 		iommu_domain_free(v->domain);
665 	}
666 
667 	v->domain = NULL;
668 }
669 
670 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
671 {
672 	struct vhost_vdpa *v;
673 	struct vhost_dev *dev;
674 	struct vhost_virtqueue **vqs;
675 	int nvqs, i, r, opened;
676 
677 	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
678 
679 	opened = atomic_cmpxchg(&v->opened, 0, 1);
680 	if (opened)
681 		return -EBUSY;
682 
683 	nvqs = v->nvqs;
684 	vhost_vdpa_reset(v);
685 
686 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
687 	if (!vqs) {
688 		r = -ENOMEM;
689 		goto err;
690 	}
691 
692 	dev = &v->vdev;
693 	for (i = 0; i < nvqs; i++) {
694 		vqs[i] = &v->vqs[i];
695 		vqs[i]->handle_kick = handle_vq_kick;
696 	}
697 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
698 		       vhost_vdpa_process_iotlb_msg);
699 
700 	dev->iotlb = vhost_iotlb_alloc(0, 0);
701 	if (!dev->iotlb) {
702 		r = -ENOMEM;
703 		goto err_init_iotlb;
704 	}
705 
706 	r = vhost_vdpa_alloc_domain(v);
707 	if (r)
708 		goto err_init_iotlb;
709 
710 	filep->private_data = v;
711 
712 	return 0;
713 
714 err_init_iotlb:
715 	vhost_dev_cleanup(&v->vdev);
716 err:
717 	atomic_dec(&v->opened);
718 	return r;
719 }
720 
721 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
722 {
723 	struct vhost_vdpa *v = filep->private_data;
724 	struct vhost_dev *d = &v->vdev;
725 
726 	mutex_lock(&d->mutex);
727 	filep->private_data = NULL;
728 	vhost_vdpa_reset(v);
729 	vhost_dev_stop(&v->vdev);
730 	vhost_vdpa_iotlb_free(v);
731 	vhost_vdpa_free_domain(v);
732 	vhost_dev_cleanup(&v->vdev);
733 	kfree(v->vdev.vqs);
734 	mutex_unlock(&d->mutex);
735 
736 	atomic_dec(&v->opened);
737 	complete(&v->completion);
738 
739 	return 0;
740 }
741 
742 static const struct file_operations vhost_vdpa_fops = {
743 	.owner		= THIS_MODULE,
744 	.open		= vhost_vdpa_open,
745 	.release	= vhost_vdpa_release,
746 	.write_iter	= vhost_vdpa_chr_write_iter,
747 	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
748 	.compat_ioctl	= compat_ptr_ioctl,
749 };
750 
751 static void vhost_vdpa_release_dev(struct device *device)
752 {
753 	struct vhost_vdpa *v =
754 	       container_of(device, struct vhost_vdpa, dev);
755 
756 	ida_simple_remove(&vhost_vdpa_ida, v->minor);
757 	kfree(v->vqs);
758 	kfree(v);
759 }
760 
761 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
762 {
763 	const struct vdpa_config_ops *ops = vdpa->config;
764 	struct vhost_vdpa *v;
765 	int minor, nvqs = VHOST_VDPA_VQ_MAX;
766 	int r;
767 
768 	/* Currently, we only accept the network devices. */
769 	if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
770 		return -ENOTSUPP;
771 
772 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
773 	if (!v)
774 		return -ENOMEM;
775 
776 	minor = ida_simple_get(&vhost_vdpa_ida, 0,
777 			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
778 	if (minor < 0) {
779 		kfree(v);
780 		return minor;
781 	}
782 
783 	atomic_set(&v->opened, 0);
784 	v->minor = minor;
785 	v->vdpa = vdpa;
786 	v->nvqs = nvqs;
787 	v->virtio_id = ops->get_device_id(vdpa);
788 
789 	device_initialize(&v->dev);
790 	v->dev.release = vhost_vdpa_release_dev;
791 	v->dev.parent = &vdpa->dev;
792 	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
793 	v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
794 			       GFP_KERNEL);
795 	if (!v->vqs) {
796 		r = -ENOMEM;
797 		goto err;
798 	}
799 
800 	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
801 	if (r)
802 		goto err;
803 
804 	cdev_init(&v->cdev, &vhost_vdpa_fops);
805 	v->cdev.owner = THIS_MODULE;
806 
807 	r = cdev_device_add(&v->cdev, &v->dev);
808 	if (r)
809 		goto err;
810 
811 	init_completion(&v->completion);
812 	vdpa_set_drvdata(vdpa, v);
813 
814 	return 0;
815 
816 err:
817 	put_device(&v->dev);
818 	return r;
819 }
820 
821 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
822 {
823 	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
824 	int opened;
825 
826 	cdev_device_del(&v->cdev, &v->dev);
827 
828 	do {
829 		opened = atomic_cmpxchg(&v->opened, 0, 1);
830 		if (!opened)
831 			break;
832 		wait_for_completion(&v->completion);
833 	} while (1);
834 
835 	put_device(&v->dev);
836 }
837 
838 static struct vdpa_driver vhost_vdpa_driver = {
839 	.driver = {
840 		.name	= "vhost_vdpa",
841 	},
842 	.probe	= vhost_vdpa_probe,
843 	.remove	= vhost_vdpa_remove,
844 };
845 
846 static int __init vhost_vdpa_init(void)
847 {
848 	int r;
849 
850 	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
851 				"vhost-vdpa");
852 	if (r)
853 		goto err_alloc_chrdev;
854 
855 	r = vdpa_register_driver(&vhost_vdpa_driver);
856 	if (r)
857 		goto err_vdpa_register_driver;
858 
859 	return 0;
860 
861 err_vdpa_register_driver:
862 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
863 err_alloc_chrdev:
864 	return r;
865 }
866 module_init(vhost_vdpa_init);
867 
868 static void __exit vhost_vdpa_exit(void)
869 {
870 	vdpa_unregister_driver(&vhost_vdpa_driver);
871 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
872 }
873 module_exit(vhost_vdpa_exit);
874 
875 MODULE_VERSION("0.0.1");
876 MODULE_LICENSE("GPL v2");
877 MODULE_AUTHOR("Intel Corporation");
878 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
879