xref: /openbmc/linux/drivers/vhost/vdpa.c (revision fb574682)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25 #include <linux/kernel.h>
26 
27 #include "vhost.h"
28 
29 enum {
30 	VHOST_VDPA_FEATURES =
31 		(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
32 		(1ULL << VIRTIO_F_ANY_LAYOUT) |
33 		(1ULL << VIRTIO_F_VERSION_1) |
34 		(1ULL << VIRTIO_F_IOMMU_PLATFORM) |
35 		(1ULL << VIRTIO_F_RING_PACKED) |
36 		(1ULL << VIRTIO_F_ORDER_PLATFORM) |
37 		(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
38 		(1ULL << VIRTIO_RING_F_EVENT_IDX),
39 
40 	VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
41 		(1ULL << VIRTIO_NET_F_CSUM) |
42 		(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
43 		(1ULL << VIRTIO_NET_F_MTU) |
44 		(1ULL << VIRTIO_NET_F_MAC) |
45 		(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
46 		(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
47 		(1ULL << VIRTIO_NET_F_GUEST_ECN) |
48 		(1ULL << VIRTIO_NET_F_GUEST_UFO) |
49 		(1ULL << VIRTIO_NET_F_HOST_TSO4) |
50 		(1ULL << VIRTIO_NET_F_HOST_TSO6) |
51 		(1ULL << VIRTIO_NET_F_HOST_ECN) |
52 		(1ULL << VIRTIO_NET_F_HOST_UFO) |
53 		(1ULL << VIRTIO_NET_F_MRG_RXBUF) |
54 		(1ULL << VIRTIO_NET_F_STATUS) |
55 		(1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
56 };
57 
58 /* Currently, only network backend w/o multiqueue is supported. */
59 #define VHOST_VDPA_VQ_MAX	2
60 
61 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
62 
63 struct vhost_vdpa {
64 	struct vhost_dev vdev;
65 	struct iommu_domain *domain;
66 	struct vhost_virtqueue *vqs;
67 	struct completion completion;
68 	struct vdpa_device *vdpa;
69 	struct device dev;
70 	struct cdev cdev;
71 	atomic_t opened;
72 	int nvqs;
73 	int virtio_id;
74 	int minor;
75 	struct eventfd_ctx *config_ctx;
76 };
77 
78 static DEFINE_IDA(vhost_vdpa_ida);
79 
80 static dev_t vhost_vdpa_major;
81 
82 static const u64 vhost_vdpa_features[] = {
83 	[VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
84 };
85 
86 static void handle_vq_kick(struct vhost_work *work)
87 {
88 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
89 						  poll.work);
90 	struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
91 	const struct vdpa_config_ops *ops = v->vdpa->config;
92 
93 	ops->kick_vq(v->vdpa, vq - v->vqs);
94 }
95 
96 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
97 {
98 	struct vhost_virtqueue *vq = private;
99 	struct eventfd_ctx *call_ctx = vq->call_ctx;
100 
101 	if (call_ctx)
102 		eventfd_signal(call_ctx, 1);
103 
104 	return IRQ_HANDLED;
105 }
106 
107 static irqreturn_t vhost_vdpa_config_cb(void *private)
108 {
109 	struct vhost_vdpa *v = private;
110 	struct eventfd_ctx *config_ctx = v->config_ctx;
111 
112 	if (config_ctx)
113 		eventfd_signal(config_ctx, 1);
114 
115 	return IRQ_HANDLED;
116 }
117 
118 static void vhost_vdpa_reset(struct vhost_vdpa *v)
119 {
120 	struct vdpa_device *vdpa = v->vdpa;
121 	const struct vdpa_config_ops *ops = vdpa->config;
122 
123 	ops->set_status(vdpa, 0);
124 }
125 
126 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
127 {
128 	struct vdpa_device *vdpa = v->vdpa;
129 	const struct vdpa_config_ops *ops = vdpa->config;
130 	u32 device_id;
131 
132 	device_id = ops->get_device_id(vdpa);
133 
134 	if (copy_to_user(argp, &device_id, sizeof(device_id)))
135 		return -EFAULT;
136 
137 	return 0;
138 }
139 
140 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
141 {
142 	struct vdpa_device *vdpa = v->vdpa;
143 	const struct vdpa_config_ops *ops = vdpa->config;
144 	u8 status;
145 
146 	status = ops->get_status(vdpa);
147 
148 	if (copy_to_user(statusp, &status, sizeof(status)))
149 		return -EFAULT;
150 
151 	return 0;
152 }
153 
154 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
155 {
156 	struct vdpa_device *vdpa = v->vdpa;
157 	const struct vdpa_config_ops *ops = vdpa->config;
158 	u8 status;
159 
160 	if (copy_from_user(&status, statusp, sizeof(status)))
161 		return -EFAULT;
162 
163 	/*
164 	 * Userspace shouldn't remove status bits unless reset the
165 	 * status to 0.
166 	 */
167 	if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
168 		return -EINVAL;
169 
170 	ops->set_status(vdpa, status);
171 
172 	return 0;
173 }
174 
175 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
176 				      struct vhost_vdpa_config *c)
177 {
178 	long size = 0;
179 
180 	switch (v->virtio_id) {
181 	case VIRTIO_ID_NET:
182 		size = sizeof(struct virtio_net_config);
183 		break;
184 	}
185 
186 	if (c->len == 0)
187 		return -EINVAL;
188 
189 	if (c->len > size - c->off)
190 		return -E2BIG;
191 
192 	return 0;
193 }
194 
195 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
196 				  struct vhost_vdpa_config __user *c)
197 {
198 	struct vdpa_device *vdpa = v->vdpa;
199 	const struct vdpa_config_ops *ops = vdpa->config;
200 	struct vhost_vdpa_config config;
201 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
202 	u8 *buf;
203 
204 	if (copy_from_user(&config, c, size))
205 		return -EFAULT;
206 	if (vhost_vdpa_config_validate(v, &config))
207 		return -EINVAL;
208 	buf = kvzalloc(config.len, GFP_KERNEL);
209 	if (!buf)
210 		return -ENOMEM;
211 
212 	ops->get_config(vdpa, config.off, buf, config.len);
213 
214 	if (copy_to_user(c->buf, buf, config.len)) {
215 		kvfree(buf);
216 		return -EFAULT;
217 	}
218 
219 	kvfree(buf);
220 	return 0;
221 }
222 
223 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
224 				  struct vhost_vdpa_config __user *c)
225 {
226 	struct vdpa_device *vdpa = v->vdpa;
227 	const struct vdpa_config_ops *ops = vdpa->config;
228 	struct vhost_vdpa_config config;
229 	unsigned long size = offsetof(struct vhost_vdpa_config, buf);
230 	u8 *buf;
231 
232 	if (copy_from_user(&config, c, size))
233 		return -EFAULT;
234 	if (vhost_vdpa_config_validate(v, &config))
235 		return -EINVAL;
236 	buf = kvzalloc(config.len, GFP_KERNEL);
237 	if (!buf)
238 		return -ENOMEM;
239 
240 	if (copy_from_user(buf, c->buf, config.len)) {
241 		kvfree(buf);
242 		return -EFAULT;
243 	}
244 
245 	ops->set_config(vdpa, config.off, buf, config.len);
246 
247 	kvfree(buf);
248 	return 0;
249 }
250 
251 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
252 {
253 	struct vdpa_device *vdpa = v->vdpa;
254 	const struct vdpa_config_ops *ops = vdpa->config;
255 	u64 features;
256 
257 	features = ops->get_features(vdpa);
258 	features &= vhost_vdpa_features[v->virtio_id];
259 
260 	if (copy_to_user(featurep, &features, sizeof(features)))
261 		return -EFAULT;
262 
263 	return 0;
264 }
265 
266 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
267 {
268 	struct vdpa_device *vdpa = v->vdpa;
269 	const struct vdpa_config_ops *ops = vdpa->config;
270 	u64 features;
271 
272 	/*
273 	 * It's not allowed to change the features after they have
274 	 * been negotiated.
275 	 */
276 	if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
277 		return -EBUSY;
278 
279 	if (copy_from_user(&features, featurep, sizeof(features)))
280 		return -EFAULT;
281 
282 	if (features & ~vhost_vdpa_features[v->virtio_id])
283 		return -EINVAL;
284 
285 	if (ops->set_features(vdpa, features))
286 		return -EINVAL;
287 
288 	return 0;
289 }
290 
291 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
292 {
293 	struct vdpa_device *vdpa = v->vdpa;
294 	const struct vdpa_config_ops *ops = vdpa->config;
295 	u16 num;
296 
297 	num = ops->get_vq_num_max(vdpa);
298 
299 	if (copy_to_user(argp, &num, sizeof(num)))
300 		return -EFAULT;
301 
302 	return 0;
303 }
304 
305 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
306 {
307 	if (v->config_ctx)
308 		eventfd_ctx_put(v->config_ctx);
309 }
310 
311 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
312 {
313 	struct vdpa_callback cb;
314 	int fd;
315 	struct eventfd_ctx *ctx;
316 
317 	cb.callback = vhost_vdpa_config_cb;
318 	cb.private = v->vdpa;
319 	if (copy_from_user(&fd, argp, sizeof(fd)))
320 		return  -EFAULT;
321 
322 	ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
323 	swap(ctx, v->config_ctx);
324 
325 	if (!IS_ERR_OR_NULL(ctx))
326 		eventfd_ctx_put(ctx);
327 
328 	if (IS_ERR(v->config_ctx))
329 		return PTR_ERR(v->config_ctx);
330 
331 	v->vdpa->config->set_config_cb(v->vdpa, &cb);
332 
333 	return 0;
334 }
335 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
336 				   void __user *argp)
337 {
338 	struct vdpa_device *vdpa = v->vdpa;
339 	const struct vdpa_config_ops *ops = vdpa->config;
340 	struct vdpa_callback cb;
341 	struct vhost_virtqueue *vq;
342 	struct vhost_vring_state s;
343 	u32 idx;
344 	long r;
345 
346 	r = get_user(idx, (u32 __user *)argp);
347 	if (r < 0)
348 		return r;
349 
350 	if (idx >= v->nvqs)
351 		return -ENOBUFS;
352 
353 	idx = array_index_nospec(idx, v->nvqs);
354 	vq = &v->vqs[idx];
355 
356 	if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
357 		if (copy_from_user(&s, argp, sizeof(s)))
358 			return -EFAULT;
359 		ops->set_vq_ready(vdpa, idx, s.num);
360 		return 0;
361 	}
362 
363 	if (cmd == VHOST_GET_VRING_BASE)
364 		vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
365 
366 	r = vhost_vring_ioctl(&v->vdev, cmd, argp);
367 	if (r)
368 		return r;
369 
370 	switch (cmd) {
371 	case VHOST_SET_VRING_ADDR:
372 		if (ops->set_vq_address(vdpa, idx,
373 					(u64)(uintptr_t)vq->desc,
374 					(u64)(uintptr_t)vq->avail,
375 					(u64)(uintptr_t)vq->used))
376 			r = -EINVAL;
377 		break;
378 
379 	case VHOST_SET_VRING_BASE:
380 		if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
381 			r = -EINVAL;
382 		break;
383 
384 	case VHOST_SET_VRING_CALL:
385 		if (vq->call_ctx) {
386 			cb.callback = vhost_vdpa_virtqueue_cb;
387 			cb.private = vq;
388 		} else {
389 			cb.callback = NULL;
390 			cb.private = NULL;
391 		}
392 		ops->set_vq_cb(vdpa, idx, &cb);
393 		break;
394 
395 	case VHOST_SET_VRING_NUM:
396 		ops->set_vq_num(vdpa, idx, vq->num);
397 		break;
398 	}
399 
400 	return r;
401 }
402 
403 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
404 				      unsigned int cmd, unsigned long arg)
405 {
406 	struct vhost_vdpa *v = filep->private_data;
407 	struct vhost_dev *d = &v->vdev;
408 	void __user *argp = (void __user *)arg;
409 	long r;
410 
411 	mutex_lock(&d->mutex);
412 
413 	switch (cmd) {
414 	case VHOST_VDPA_GET_DEVICE_ID:
415 		r = vhost_vdpa_get_device_id(v, argp);
416 		break;
417 	case VHOST_VDPA_GET_STATUS:
418 		r = vhost_vdpa_get_status(v, argp);
419 		break;
420 	case VHOST_VDPA_SET_STATUS:
421 		r = vhost_vdpa_set_status(v, argp);
422 		break;
423 	case VHOST_VDPA_GET_CONFIG:
424 		r = vhost_vdpa_get_config(v, argp);
425 		break;
426 	case VHOST_VDPA_SET_CONFIG:
427 		r = vhost_vdpa_set_config(v, argp);
428 		break;
429 	case VHOST_GET_FEATURES:
430 		r = vhost_vdpa_get_features(v, argp);
431 		break;
432 	case VHOST_SET_FEATURES:
433 		r = vhost_vdpa_set_features(v, argp);
434 		break;
435 	case VHOST_VDPA_GET_VRING_NUM:
436 		r = vhost_vdpa_get_vring_num(v, argp);
437 		break;
438 	case VHOST_SET_LOG_BASE:
439 	case VHOST_SET_LOG_FD:
440 		r = -ENOIOCTLCMD;
441 		break;
442 	case VHOST_VDPA_SET_CONFIG_CALL:
443 		r = vhost_vdpa_set_config_call(v, argp);
444 		break;
445 	default:
446 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
447 		if (r == -ENOIOCTLCMD)
448 			r = vhost_vdpa_vring_ioctl(v, cmd, argp);
449 		break;
450 	}
451 
452 	mutex_unlock(&d->mutex);
453 	return r;
454 }
455 
456 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
457 {
458 	struct vhost_dev *dev = &v->vdev;
459 	struct vhost_iotlb *iotlb = dev->iotlb;
460 	struct vhost_iotlb_map *map;
461 	struct page *page;
462 	unsigned long pfn, pinned;
463 
464 	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
465 		pinned = map->size >> PAGE_SHIFT;
466 		for (pfn = map->addr >> PAGE_SHIFT;
467 		     pinned > 0; pfn++, pinned--) {
468 			page = pfn_to_page(pfn);
469 			if (map->perm & VHOST_ACCESS_WO)
470 				set_page_dirty_lock(page);
471 			unpin_user_page(page);
472 		}
473 		atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
474 		vhost_iotlb_map_free(iotlb, map);
475 	}
476 }
477 
478 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
479 {
480 	struct vhost_dev *dev = &v->vdev;
481 
482 	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
483 	kfree(dev->iotlb);
484 	dev->iotlb = NULL;
485 }
486 
487 static int perm_to_iommu_flags(u32 perm)
488 {
489 	int flags = 0;
490 
491 	switch (perm) {
492 	case VHOST_ACCESS_WO:
493 		flags |= IOMMU_WRITE;
494 		break;
495 	case VHOST_ACCESS_RO:
496 		flags |= IOMMU_READ;
497 		break;
498 	case VHOST_ACCESS_RW:
499 		flags |= (IOMMU_WRITE | IOMMU_READ);
500 		break;
501 	default:
502 		WARN(1, "invalidate vhost IOTLB permission\n");
503 		break;
504 	}
505 
506 	return flags | IOMMU_CACHE;
507 }
508 
509 static int vhost_vdpa_map(struct vhost_vdpa *v,
510 			  u64 iova, u64 size, u64 pa, u32 perm)
511 {
512 	struct vhost_dev *dev = &v->vdev;
513 	struct vdpa_device *vdpa = v->vdpa;
514 	const struct vdpa_config_ops *ops = vdpa->config;
515 	int r = 0;
516 
517 	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
518 				  pa, perm);
519 	if (r)
520 		return r;
521 
522 	if (ops->dma_map)
523 		r = ops->dma_map(vdpa, iova, size, pa, perm);
524 	else if (ops->set_map)
525 		r = ops->set_map(vdpa, dev->iotlb);
526 	else
527 		r = iommu_map(v->domain, iova, pa, size,
528 			      perm_to_iommu_flags(perm));
529 
530 	return r;
531 }
532 
533 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
534 {
535 	struct vhost_dev *dev = &v->vdev;
536 	struct vdpa_device *vdpa = v->vdpa;
537 	const struct vdpa_config_ops *ops = vdpa->config;
538 
539 	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
540 
541 	if (ops->dma_map)
542 		ops->dma_unmap(vdpa, iova, size);
543 	else if (ops->set_map)
544 		ops->set_map(vdpa, dev->iotlb);
545 	else
546 		iommu_unmap(v->domain, iova, size);
547 }
548 
549 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
550 					   struct vhost_iotlb_msg *msg)
551 {
552 	struct vhost_dev *dev = &v->vdev;
553 	struct vhost_iotlb *iotlb = dev->iotlb;
554 	struct page **page_list;
555 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
556 	unsigned int gup_flags = FOLL_LONGTERM;
557 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
558 	unsigned long locked, lock_limit, pinned, i;
559 	u64 iova = msg->iova;
560 	int ret = 0;
561 
562 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
563 				    msg->iova + msg->size - 1))
564 		return -EEXIST;
565 
566 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
567 	if (!page_list)
568 		return -ENOMEM;
569 
570 	if (msg->perm & VHOST_ACCESS_WO)
571 		gup_flags |= FOLL_WRITE;
572 
573 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
574 	if (!npages)
575 		return -EINVAL;
576 
577 	mmap_read_lock(dev->mm);
578 
579 	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
580 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
581 
582 	if (locked > lock_limit) {
583 		ret = -ENOMEM;
584 		goto out;
585 	}
586 
587 	cur_base = msg->uaddr & PAGE_MASK;
588 	iova &= PAGE_MASK;
589 
590 	while (npages) {
591 		pinned = min_t(unsigned long, npages, list_size);
592 		ret = pin_user_pages(cur_base, pinned,
593 				     gup_flags, page_list, NULL);
594 		if (ret != pinned)
595 			goto out;
596 
597 		if (!last_pfn)
598 			map_pfn = page_to_pfn(page_list[0]);
599 
600 		for (i = 0; i < ret; i++) {
601 			unsigned long this_pfn = page_to_pfn(page_list[i]);
602 			u64 csize;
603 
604 			if (last_pfn && (this_pfn != last_pfn + 1)) {
605 				/* Pin a contiguous chunk of memory */
606 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
607 				if (vhost_vdpa_map(v, iova, csize,
608 						   map_pfn << PAGE_SHIFT,
609 						   msg->perm))
610 					goto out;
611 				map_pfn = this_pfn;
612 				iova += csize;
613 			}
614 
615 			last_pfn = this_pfn;
616 		}
617 
618 		cur_base += ret << PAGE_SHIFT;
619 		npages -= ret;
620 	}
621 
622 	/* Pin the rest chunk */
623 	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
624 			     map_pfn << PAGE_SHIFT, msg->perm);
625 out:
626 	if (ret) {
627 		vhost_vdpa_unmap(v, msg->iova, msg->size);
628 		atomic64_sub(npages, &dev->mm->pinned_vm);
629 	}
630 	mmap_read_unlock(dev->mm);
631 	free_page((unsigned long)page_list);
632 	return ret;
633 }
634 
635 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
636 					struct vhost_iotlb_msg *msg)
637 {
638 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
639 	int r = 0;
640 
641 	r = vhost_dev_check_owner(dev);
642 	if (r)
643 		return r;
644 
645 	switch (msg->type) {
646 	case VHOST_IOTLB_UPDATE:
647 		r = vhost_vdpa_process_iotlb_update(v, msg);
648 		break;
649 	case VHOST_IOTLB_INVALIDATE:
650 		vhost_vdpa_unmap(v, msg->iova, msg->size);
651 		break;
652 	default:
653 		r = -EINVAL;
654 		break;
655 	}
656 
657 	return r;
658 }
659 
660 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
661 					 struct iov_iter *from)
662 {
663 	struct file *file = iocb->ki_filp;
664 	struct vhost_vdpa *v = file->private_data;
665 	struct vhost_dev *dev = &v->vdev;
666 
667 	return vhost_chr_write_iter(dev, from);
668 }
669 
670 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
671 {
672 	struct vdpa_device *vdpa = v->vdpa;
673 	const struct vdpa_config_ops *ops = vdpa->config;
674 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
675 	struct bus_type *bus;
676 	int ret;
677 
678 	/* Device want to do DMA by itself */
679 	if (ops->set_map || ops->dma_map)
680 		return 0;
681 
682 	bus = dma_dev->bus;
683 	if (!bus)
684 		return -EFAULT;
685 
686 	if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
687 		return -ENOTSUPP;
688 
689 	v->domain = iommu_domain_alloc(bus);
690 	if (!v->domain)
691 		return -EIO;
692 
693 	ret = iommu_attach_device(v->domain, dma_dev);
694 	if (ret)
695 		goto err_attach;
696 
697 	return 0;
698 
699 err_attach:
700 	iommu_domain_free(v->domain);
701 	return ret;
702 }
703 
704 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
705 {
706 	struct vdpa_device *vdpa = v->vdpa;
707 	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
708 
709 	if (v->domain) {
710 		iommu_detach_device(v->domain, dma_dev);
711 		iommu_domain_free(v->domain);
712 	}
713 
714 	v->domain = NULL;
715 }
716 
717 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
718 {
719 	struct vhost_vdpa *v;
720 	struct vhost_dev *dev;
721 	struct vhost_virtqueue **vqs;
722 	int nvqs, i, r, opened;
723 
724 	v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
725 
726 	opened = atomic_cmpxchg(&v->opened, 0, 1);
727 	if (opened)
728 		return -EBUSY;
729 
730 	nvqs = v->nvqs;
731 	vhost_vdpa_reset(v);
732 
733 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
734 	if (!vqs) {
735 		r = -ENOMEM;
736 		goto err;
737 	}
738 
739 	dev = &v->vdev;
740 	for (i = 0; i < nvqs; i++) {
741 		vqs[i] = &v->vqs[i];
742 		vqs[i]->handle_kick = handle_vq_kick;
743 	}
744 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
745 		       vhost_vdpa_process_iotlb_msg);
746 
747 	dev->iotlb = vhost_iotlb_alloc(0, 0);
748 	if (!dev->iotlb) {
749 		r = -ENOMEM;
750 		goto err_init_iotlb;
751 	}
752 
753 	r = vhost_vdpa_alloc_domain(v);
754 	if (r)
755 		goto err_init_iotlb;
756 
757 	filep->private_data = v;
758 
759 	return 0;
760 
761 err_init_iotlb:
762 	vhost_dev_cleanup(&v->vdev);
763 err:
764 	atomic_dec(&v->opened);
765 	return r;
766 }
767 
768 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
769 {
770 	struct vhost_vdpa *v = filep->private_data;
771 	struct vhost_dev *d = &v->vdev;
772 
773 	mutex_lock(&d->mutex);
774 	filep->private_data = NULL;
775 	vhost_vdpa_reset(v);
776 	vhost_dev_stop(&v->vdev);
777 	vhost_vdpa_iotlb_free(v);
778 	vhost_vdpa_free_domain(v);
779 	vhost_vdpa_config_put(v);
780 	vhost_dev_cleanup(&v->vdev);
781 	kfree(v->vdev.vqs);
782 	mutex_unlock(&d->mutex);
783 
784 	atomic_dec(&v->opened);
785 	complete(&v->completion);
786 
787 	return 0;
788 }
789 
790 #ifdef CONFIG_MMU
791 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
792 {
793 	struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
794 	struct vdpa_device *vdpa = v->vdpa;
795 	const struct vdpa_config_ops *ops = vdpa->config;
796 	struct vdpa_notification_area notify;
797 	struct vm_area_struct *vma = vmf->vma;
798 	u16 index = vma->vm_pgoff;
799 
800 	notify = ops->get_vq_notification(vdpa, index);
801 
802 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
803 	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
804 			    notify.addr >> PAGE_SHIFT, PAGE_SIZE,
805 			    vma->vm_page_prot))
806 		return VM_FAULT_SIGBUS;
807 
808 	return VM_FAULT_NOPAGE;
809 }
810 
811 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
812 	.fault = vhost_vdpa_fault,
813 };
814 
815 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
816 {
817 	struct vhost_vdpa *v = vma->vm_file->private_data;
818 	struct vdpa_device *vdpa = v->vdpa;
819 	const struct vdpa_config_ops *ops = vdpa->config;
820 	struct vdpa_notification_area notify;
821 	int index = vma->vm_pgoff;
822 
823 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
824 		return -EINVAL;
825 	if ((vma->vm_flags & VM_SHARED) == 0)
826 		return -EINVAL;
827 	if (vma->vm_flags & VM_READ)
828 		return -EINVAL;
829 	if (index > 65535)
830 		return -EINVAL;
831 	if (!ops->get_vq_notification)
832 		return -ENOTSUPP;
833 
834 	/* To be safe and easily modelled by userspace, We only
835 	 * support the doorbell which sits on the page boundary and
836 	 * does not share the page with other registers.
837 	 */
838 	notify = ops->get_vq_notification(vdpa, index);
839 	if (notify.addr & (PAGE_SIZE - 1))
840 		return -EINVAL;
841 	if (vma->vm_end - vma->vm_start != notify.size)
842 		return -ENOTSUPP;
843 
844 	vma->vm_ops = &vhost_vdpa_vm_ops;
845 	return 0;
846 }
847 #endif /* CONFIG_MMU */
848 
849 static const struct file_operations vhost_vdpa_fops = {
850 	.owner		= THIS_MODULE,
851 	.open		= vhost_vdpa_open,
852 	.release	= vhost_vdpa_release,
853 	.write_iter	= vhost_vdpa_chr_write_iter,
854 	.unlocked_ioctl	= vhost_vdpa_unlocked_ioctl,
855 #ifdef CONFIG_MMU
856 	.mmap		= vhost_vdpa_mmap,
857 #endif /* CONFIG_MMU */
858 	.compat_ioctl	= compat_ptr_ioctl,
859 };
860 
861 static void vhost_vdpa_release_dev(struct device *device)
862 {
863 	struct vhost_vdpa *v =
864 	       container_of(device, struct vhost_vdpa, dev);
865 
866 	ida_simple_remove(&vhost_vdpa_ida, v->minor);
867 	kfree(v->vqs);
868 	kfree(v);
869 }
870 
871 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
872 {
873 	const struct vdpa_config_ops *ops = vdpa->config;
874 	struct vhost_vdpa *v;
875 	int minor, nvqs = VHOST_VDPA_VQ_MAX;
876 	int r;
877 
878 	/* Currently, we only accept the network devices. */
879 	if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
880 		return -ENOTSUPP;
881 
882 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
883 	if (!v)
884 		return -ENOMEM;
885 
886 	minor = ida_simple_get(&vhost_vdpa_ida, 0,
887 			       VHOST_VDPA_DEV_MAX, GFP_KERNEL);
888 	if (minor < 0) {
889 		kfree(v);
890 		return minor;
891 	}
892 
893 	atomic_set(&v->opened, 0);
894 	v->minor = minor;
895 	v->vdpa = vdpa;
896 	v->nvqs = nvqs;
897 	v->virtio_id = ops->get_device_id(vdpa);
898 
899 	device_initialize(&v->dev);
900 	v->dev.release = vhost_vdpa_release_dev;
901 	v->dev.parent = &vdpa->dev;
902 	v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
903 	v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
904 			       GFP_KERNEL);
905 	if (!v->vqs) {
906 		r = -ENOMEM;
907 		goto err;
908 	}
909 
910 	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
911 	if (r)
912 		goto err;
913 
914 	cdev_init(&v->cdev, &vhost_vdpa_fops);
915 	v->cdev.owner = THIS_MODULE;
916 
917 	r = cdev_device_add(&v->cdev, &v->dev);
918 	if (r)
919 		goto err;
920 
921 	init_completion(&v->completion);
922 	vdpa_set_drvdata(vdpa, v);
923 
924 	return 0;
925 
926 err:
927 	put_device(&v->dev);
928 	return r;
929 }
930 
931 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
932 {
933 	struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
934 	int opened;
935 
936 	cdev_device_del(&v->cdev, &v->dev);
937 
938 	do {
939 		opened = atomic_cmpxchg(&v->opened, 0, 1);
940 		if (!opened)
941 			break;
942 		wait_for_completion(&v->completion);
943 	} while (1);
944 
945 	put_device(&v->dev);
946 }
947 
948 static struct vdpa_driver vhost_vdpa_driver = {
949 	.driver = {
950 		.name	= "vhost_vdpa",
951 	},
952 	.probe	= vhost_vdpa_probe,
953 	.remove	= vhost_vdpa_remove,
954 };
955 
956 static int __init vhost_vdpa_init(void)
957 {
958 	int r;
959 
960 	r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
961 				"vhost-vdpa");
962 	if (r)
963 		goto err_alloc_chrdev;
964 
965 	r = vdpa_register_driver(&vhost_vdpa_driver);
966 	if (r)
967 		goto err_vdpa_register_driver;
968 
969 	return 0;
970 
971 err_vdpa_register_driver:
972 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
973 err_alloc_chrdev:
974 	return r;
975 }
976 module_init(vhost_vdpa_init);
977 
978 static void __exit vhost_vdpa_exit(void)
979 {
980 	vdpa_unregister_driver(&vhost_vdpa_driver);
981 	unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
982 }
983 module_exit(vhost_vdpa_exit);
984 
985 MODULE_VERSION("0.0.1");
986 MODULE_LICENSE("GPL v2");
987 MODULE_AUTHOR("Intel Corporation");
988 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
989