xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/kvmgt.c (revision 10b62a2f)
1 /*
2  * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3  *
4  * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Kevin Tian <kevin.tian@intel.com>
27  *    Jike Song <jike.song@intel.com>
28  *    Xiaoguang Chen <xiaoguang.chen@intel.com>
29  */
30 
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44 
45 #include "i915_drv.h"
46 #include "gvt.h"
47 
48 static const struct intel_gvt_ops *intel_gvt_ops;
49 
50 /* helper macros copied from vfio-pci */
51 #define VFIO_PCI_OFFSET_SHIFT   40
52 #define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
53 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
55 
56 struct vfio_region {
57 	u32				type;
58 	u32				subtype;
59 	size_t				size;
60 	u32				flags;
61 };
62 
63 struct kvmgt_pgfn {
64 	gfn_t gfn;
65 	struct hlist_node hnode;
66 };
67 
68 struct kvmgt_guest_info {
69 	struct kvm *kvm;
70 	struct intel_vgpu *vgpu;
71 	struct kvm_page_track_notifier_node track_node;
72 #define NR_BKT (1 << 18)
73 	struct hlist_head ptable[NR_BKT];
74 #undef NR_BKT
75 };
76 
77 struct gvt_dma {
78 	struct rb_node node;
79 	gfn_t gfn;
80 	unsigned long iova;
81 };
82 
83 static inline bool handle_valid(unsigned long handle)
84 {
85 	return !!(handle & ~0xff);
86 }
87 
88 static int kvmgt_guest_init(struct mdev_device *mdev);
89 static void intel_vgpu_release_work(struct work_struct *work);
90 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
91 
92 static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
93 		unsigned long *iova)
94 {
95 	struct page *page;
96 	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
97 	dma_addr_t daddr;
98 
99 	if (unlikely(!pfn_valid(pfn)))
100 		return -EFAULT;
101 
102 	page = pfn_to_page(pfn);
103 	daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
104 			PCI_DMA_BIDIRECTIONAL);
105 	if (dma_mapping_error(dev, daddr))
106 		return -ENOMEM;
107 
108 	*iova = (unsigned long)(daddr >> PAGE_SHIFT);
109 	return 0;
110 }
111 
112 static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
113 {
114 	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
115 	dma_addr_t daddr;
116 
117 	daddr = (dma_addr_t)(iova << PAGE_SHIFT);
118 	dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
119 }
120 
121 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
122 {
123 	struct rb_node *node = vgpu->vdev.cache.rb_node;
124 	struct gvt_dma *ret = NULL;
125 
126 	while (node) {
127 		struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
128 
129 		if (gfn < itr->gfn)
130 			node = node->rb_left;
131 		else if (gfn > itr->gfn)
132 			node = node->rb_right;
133 		else {
134 			ret = itr;
135 			goto out;
136 		}
137 	}
138 
139 out:
140 	return ret;
141 }
142 
143 static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
144 {
145 	struct gvt_dma *entry;
146 	unsigned long iova;
147 
148 	mutex_lock(&vgpu->vdev.cache_lock);
149 
150 	entry = __gvt_cache_find(vgpu, gfn);
151 	iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
152 
153 	mutex_unlock(&vgpu->vdev.cache_lock);
154 	return iova;
155 }
156 
157 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
158 		unsigned long iova)
159 {
160 	struct gvt_dma *new, *itr;
161 	struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
162 
163 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
164 	if (!new)
165 		return;
166 
167 	new->gfn = gfn;
168 	new->iova = iova;
169 
170 	mutex_lock(&vgpu->vdev.cache_lock);
171 	while (*link) {
172 		parent = *link;
173 		itr = rb_entry(parent, struct gvt_dma, node);
174 
175 		if (gfn == itr->gfn)
176 			goto out;
177 		else if (gfn < itr->gfn)
178 			link = &parent->rb_left;
179 		else
180 			link = &parent->rb_right;
181 	}
182 
183 	rb_link_node(&new->node, parent, link);
184 	rb_insert_color(&new->node, &vgpu->vdev.cache);
185 	mutex_unlock(&vgpu->vdev.cache_lock);
186 	return;
187 
188 out:
189 	mutex_unlock(&vgpu->vdev.cache_lock);
190 	kfree(new);
191 }
192 
193 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
194 				struct gvt_dma *entry)
195 {
196 	rb_erase(&entry->node, &vgpu->vdev.cache);
197 	kfree(entry);
198 }
199 
200 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
201 {
202 	struct device *dev = mdev_dev(vgpu->vdev.mdev);
203 	struct gvt_dma *this;
204 	unsigned long g1;
205 	int rc;
206 
207 	mutex_lock(&vgpu->vdev.cache_lock);
208 	this  = __gvt_cache_find(vgpu, gfn);
209 	if (!this) {
210 		mutex_unlock(&vgpu->vdev.cache_lock);
211 		return;
212 	}
213 
214 	g1 = gfn;
215 	gvt_dma_unmap_iova(vgpu, this->iova);
216 	rc = vfio_unpin_pages(dev, &g1, 1);
217 	WARN_ON(rc != 1);
218 	__gvt_cache_remove_entry(vgpu, this);
219 	mutex_unlock(&vgpu->vdev.cache_lock);
220 }
221 
222 static void gvt_cache_init(struct intel_vgpu *vgpu)
223 {
224 	vgpu->vdev.cache = RB_ROOT;
225 	mutex_init(&vgpu->vdev.cache_lock);
226 }
227 
228 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
229 {
230 	struct gvt_dma *dma;
231 	struct rb_node *node = NULL;
232 	struct device *dev = mdev_dev(vgpu->vdev.mdev);
233 	unsigned long gfn;
234 
235 	for (;;) {
236 		mutex_lock(&vgpu->vdev.cache_lock);
237 		node = rb_first(&vgpu->vdev.cache);
238 		if (!node) {
239 			mutex_unlock(&vgpu->vdev.cache_lock);
240 			break;
241 		}
242 		dma = rb_entry(node, struct gvt_dma, node);
243 		gvt_dma_unmap_iova(vgpu, dma->iova);
244 		gfn = dma->gfn;
245 		__gvt_cache_remove_entry(vgpu, dma);
246 		mutex_unlock(&vgpu->vdev.cache_lock);
247 		vfio_unpin_pages(dev, &gfn, 1);
248 	}
249 }
250 
251 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
252 		const char *name)
253 {
254 	int i;
255 	struct intel_vgpu_type *t;
256 	const char *driver_name = dev_driver_string(
257 			&gvt->dev_priv->drm.pdev->dev);
258 
259 	for (i = 0; i < gvt->num_types; i++) {
260 		t = &gvt->types[i];
261 		if (!strncmp(t->name, name + strlen(driver_name) + 1,
262 			sizeof(t->name)))
263 			return t;
264 	}
265 
266 	return NULL;
267 }
268 
269 static ssize_t available_instances_show(struct kobject *kobj,
270 					struct device *dev, char *buf)
271 {
272 	struct intel_vgpu_type *type;
273 	unsigned int num = 0;
274 	void *gvt = kdev_to_i915(dev)->gvt;
275 
276 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
277 	if (!type)
278 		num = 0;
279 	else
280 		num = type->avail_instance;
281 
282 	return sprintf(buf, "%u\n", num);
283 }
284 
285 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
286 		char *buf)
287 {
288 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
289 }
290 
291 static ssize_t description_show(struct kobject *kobj, struct device *dev,
292 		char *buf)
293 {
294 	struct intel_vgpu_type *type;
295 	void *gvt = kdev_to_i915(dev)->gvt;
296 
297 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
298 	if (!type)
299 		return 0;
300 
301 	return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
302 		       "fence: %d\nresolution: %s\n"
303 		       "weight: %d\n",
304 		       BYTES_TO_MB(type->low_gm_size),
305 		       BYTES_TO_MB(type->high_gm_size),
306 		       type->fence, vgpu_edid_str(type->resolution),
307 		       type->weight);
308 }
309 
310 static MDEV_TYPE_ATTR_RO(available_instances);
311 static MDEV_TYPE_ATTR_RO(device_api);
312 static MDEV_TYPE_ATTR_RO(description);
313 
314 static struct attribute *type_attrs[] = {
315 	&mdev_type_attr_available_instances.attr,
316 	&mdev_type_attr_device_api.attr,
317 	&mdev_type_attr_description.attr,
318 	NULL,
319 };
320 
321 static struct attribute_group *intel_vgpu_type_groups[] = {
322 	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
323 };
324 
325 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
326 {
327 	int i, j;
328 	struct intel_vgpu_type *type;
329 	struct attribute_group *group;
330 
331 	for (i = 0; i < gvt->num_types; i++) {
332 		type = &gvt->types[i];
333 
334 		group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
335 		if (WARN_ON(!group))
336 			goto unwind;
337 
338 		group->name = type->name;
339 		group->attrs = type_attrs;
340 		intel_vgpu_type_groups[i] = group;
341 	}
342 
343 	return true;
344 
345 unwind:
346 	for (j = 0; j < i; j++) {
347 		group = intel_vgpu_type_groups[j];
348 		kfree(group);
349 	}
350 
351 	return false;
352 }
353 
354 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
355 {
356 	int i;
357 	struct attribute_group *group;
358 
359 	for (i = 0; i < gvt->num_types; i++) {
360 		group = intel_vgpu_type_groups[i];
361 		kfree(group);
362 	}
363 }
364 
365 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
366 {
367 	hash_init(info->ptable);
368 }
369 
370 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
371 {
372 	struct kvmgt_pgfn *p;
373 	struct hlist_node *tmp;
374 	int i;
375 
376 	hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
377 		hash_del(&p->hnode);
378 		kfree(p);
379 	}
380 }
381 
382 static struct kvmgt_pgfn *
383 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
384 {
385 	struct kvmgt_pgfn *p, *res = NULL;
386 
387 	hash_for_each_possible(info->ptable, p, hnode, gfn) {
388 		if (gfn == p->gfn) {
389 			res = p;
390 			break;
391 		}
392 	}
393 
394 	return res;
395 }
396 
397 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
398 				gfn_t gfn)
399 {
400 	struct kvmgt_pgfn *p;
401 
402 	p = __kvmgt_protect_table_find(info, gfn);
403 	return !!p;
404 }
405 
406 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
407 {
408 	struct kvmgt_pgfn *p;
409 
410 	if (kvmgt_gfn_is_write_protected(info, gfn))
411 		return;
412 
413 	p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
414 	if (WARN(!p, "gfn: 0x%llx\n", gfn))
415 		return;
416 
417 	p->gfn = gfn;
418 	hash_add(info->ptable, &p->hnode, gfn);
419 }
420 
421 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
422 				gfn_t gfn)
423 {
424 	struct kvmgt_pgfn *p;
425 
426 	p = __kvmgt_protect_table_find(info, gfn);
427 	if (p) {
428 		hash_del(&p->hnode);
429 		kfree(p);
430 	}
431 }
432 
433 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
434 {
435 	struct intel_vgpu *vgpu = NULL;
436 	struct intel_vgpu_type *type;
437 	struct device *pdev;
438 	void *gvt;
439 	int ret;
440 
441 	pdev = mdev_parent_dev(mdev);
442 	gvt = kdev_to_i915(pdev)->gvt;
443 
444 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
445 	if (!type) {
446 		gvt_vgpu_err("failed to find type %s to create\n",
447 						kobject_name(kobj));
448 		ret = -EINVAL;
449 		goto out;
450 	}
451 
452 	vgpu = intel_gvt_ops->vgpu_create(gvt, type);
453 	if (IS_ERR_OR_NULL(vgpu)) {
454 		ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
455 		gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
456 		goto out;
457 	}
458 
459 	INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
460 
461 	vgpu->vdev.mdev = mdev;
462 	mdev_set_drvdata(mdev, vgpu);
463 
464 	gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
465 		     dev_name(mdev_dev(mdev)));
466 	ret = 0;
467 
468 out:
469 	return ret;
470 }
471 
472 static int intel_vgpu_remove(struct mdev_device *mdev)
473 {
474 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
475 
476 	if (handle_valid(vgpu->handle))
477 		return -EBUSY;
478 
479 	intel_gvt_ops->vgpu_destroy(vgpu);
480 	return 0;
481 }
482 
483 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
484 				     unsigned long action, void *data)
485 {
486 	struct intel_vgpu *vgpu = container_of(nb,
487 					struct intel_vgpu,
488 					vdev.iommu_notifier);
489 
490 	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
491 		struct vfio_iommu_type1_dma_unmap *unmap = data;
492 		unsigned long gfn, end_gfn;
493 
494 		gfn = unmap->iova >> PAGE_SHIFT;
495 		end_gfn = gfn + unmap->size / PAGE_SIZE;
496 
497 		while (gfn < end_gfn)
498 			gvt_cache_remove(vgpu, gfn++);
499 	}
500 
501 	return NOTIFY_OK;
502 }
503 
504 static int intel_vgpu_group_notifier(struct notifier_block *nb,
505 				     unsigned long action, void *data)
506 {
507 	struct intel_vgpu *vgpu = container_of(nb,
508 					struct intel_vgpu,
509 					vdev.group_notifier);
510 
511 	/* the only action we care about */
512 	if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
513 		vgpu->vdev.kvm = data;
514 
515 		if (!data)
516 			schedule_work(&vgpu->vdev.release_work);
517 	}
518 
519 	return NOTIFY_OK;
520 }
521 
522 static int intel_vgpu_open(struct mdev_device *mdev)
523 {
524 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
525 	unsigned long events;
526 	int ret;
527 
528 	vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
529 	vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
530 
531 	events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
532 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
533 				&vgpu->vdev.iommu_notifier);
534 	if (ret != 0) {
535 		gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
536 			ret);
537 		goto out;
538 	}
539 
540 	events = VFIO_GROUP_NOTIFY_SET_KVM;
541 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
542 				&vgpu->vdev.group_notifier);
543 	if (ret != 0) {
544 		gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
545 			ret);
546 		goto undo_iommu;
547 	}
548 
549 	ret = kvmgt_guest_init(mdev);
550 	if (ret)
551 		goto undo_group;
552 
553 	intel_gvt_ops->vgpu_activate(vgpu);
554 
555 	atomic_set(&vgpu->vdev.released, 0);
556 	return ret;
557 
558 undo_group:
559 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
560 					&vgpu->vdev.group_notifier);
561 
562 undo_iommu:
563 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
564 					&vgpu->vdev.iommu_notifier);
565 out:
566 	return ret;
567 }
568 
569 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
570 {
571 	struct kvmgt_guest_info *info;
572 	int ret;
573 
574 	if (!handle_valid(vgpu->handle))
575 		return;
576 
577 	if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
578 		return;
579 
580 	intel_gvt_ops->vgpu_deactivate(vgpu);
581 
582 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
583 					&vgpu->vdev.iommu_notifier);
584 	WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
585 
586 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
587 					&vgpu->vdev.group_notifier);
588 	WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
589 
590 	info = (struct kvmgt_guest_info *)vgpu->handle;
591 	kvmgt_guest_exit(info);
592 
593 	vgpu->vdev.kvm = NULL;
594 	vgpu->handle = 0;
595 }
596 
597 static void intel_vgpu_release(struct mdev_device *mdev)
598 {
599 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
600 
601 	__intel_vgpu_release(vgpu);
602 }
603 
604 static void intel_vgpu_release_work(struct work_struct *work)
605 {
606 	struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
607 					vdev.release_work);
608 
609 	__intel_vgpu_release(vgpu);
610 }
611 
612 static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
613 {
614 	u32 start_lo, start_hi;
615 	u32 mem_type;
616 	int pos = PCI_BASE_ADDRESS_0;
617 
618 	start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
619 			PCI_BASE_ADDRESS_MEM_MASK;
620 	mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
621 			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
622 
623 	switch (mem_type) {
624 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
625 		start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
626 						+ pos + 4));
627 		break;
628 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
629 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
630 		/* 1M mem BAR treated as 32-bit BAR */
631 	default:
632 		/* mem unknown type treated as 32-bit BAR */
633 		start_hi = 0;
634 		break;
635 	}
636 
637 	return ((u64)start_hi << 32) | start_lo;
638 }
639 
640 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
641 			size_t count, loff_t *ppos, bool is_write)
642 {
643 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
644 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
645 	uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
646 	int ret = -EINVAL;
647 
648 
649 	if (index >= VFIO_PCI_NUM_REGIONS) {
650 		gvt_vgpu_err("invalid index: %u\n", index);
651 		return -EINVAL;
652 	}
653 
654 	switch (index) {
655 	case VFIO_PCI_CONFIG_REGION_INDEX:
656 		if (is_write)
657 			ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
658 						buf, count);
659 		else
660 			ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
661 						buf, count);
662 		break;
663 	case VFIO_PCI_BAR0_REGION_INDEX:
664 	case VFIO_PCI_BAR1_REGION_INDEX:
665 		if (is_write) {
666 			uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
667 
668 			ret = intel_gvt_ops->emulate_mmio_write(vgpu,
669 						bar0_start + pos, buf, count);
670 		} else {
671 			uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
672 
673 			ret = intel_gvt_ops->emulate_mmio_read(vgpu,
674 						bar0_start + pos, buf, count);
675 		}
676 		break;
677 	case VFIO_PCI_BAR2_REGION_INDEX:
678 	case VFIO_PCI_BAR3_REGION_INDEX:
679 	case VFIO_PCI_BAR4_REGION_INDEX:
680 	case VFIO_PCI_BAR5_REGION_INDEX:
681 	case VFIO_PCI_VGA_REGION_INDEX:
682 	case VFIO_PCI_ROM_REGION_INDEX:
683 	default:
684 		gvt_vgpu_err("unsupported region: %u\n", index);
685 	}
686 
687 	return ret == 0 ? count : ret;
688 }
689 
690 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
691 			size_t count, loff_t *ppos)
692 {
693 	unsigned int done = 0;
694 	int ret;
695 
696 	while (count) {
697 		size_t filled;
698 
699 		if (count >= 4 && !(*ppos % 4)) {
700 			u32 val;
701 
702 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
703 					ppos, false);
704 			if (ret <= 0)
705 				goto read_err;
706 
707 			if (copy_to_user(buf, &val, sizeof(val)))
708 				goto read_err;
709 
710 			filled = 4;
711 		} else if (count >= 2 && !(*ppos % 2)) {
712 			u16 val;
713 
714 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
715 					ppos, false);
716 			if (ret <= 0)
717 				goto read_err;
718 
719 			if (copy_to_user(buf, &val, sizeof(val)))
720 				goto read_err;
721 
722 			filled = 2;
723 		} else {
724 			u8 val;
725 
726 			ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
727 					false);
728 			if (ret <= 0)
729 				goto read_err;
730 
731 			if (copy_to_user(buf, &val, sizeof(val)))
732 				goto read_err;
733 
734 			filled = 1;
735 		}
736 
737 		count -= filled;
738 		done += filled;
739 		*ppos += filled;
740 		buf += filled;
741 	}
742 
743 	return done;
744 
745 read_err:
746 	return -EFAULT;
747 }
748 
749 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
750 				const char __user *buf,
751 				size_t count, loff_t *ppos)
752 {
753 	unsigned int done = 0;
754 	int ret;
755 
756 	while (count) {
757 		size_t filled;
758 
759 		if (count >= 4 && !(*ppos % 4)) {
760 			u32 val;
761 
762 			if (copy_from_user(&val, buf, sizeof(val)))
763 				goto write_err;
764 
765 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
766 					ppos, true);
767 			if (ret <= 0)
768 				goto write_err;
769 
770 			filled = 4;
771 		} else if (count >= 2 && !(*ppos % 2)) {
772 			u16 val;
773 
774 			if (copy_from_user(&val, buf, sizeof(val)))
775 				goto write_err;
776 
777 			ret = intel_vgpu_rw(mdev, (char *)&val,
778 					sizeof(val), ppos, true);
779 			if (ret <= 0)
780 				goto write_err;
781 
782 			filled = 2;
783 		} else {
784 			u8 val;
785 
786 			if (copy_from_user(&val, buf, sizeof(val)))
787 				goto write_err;
788 
789 			ret = intel_vgpu_rw(mdev, &val, sizeof(val),
790 					ppos, true);
791 			if (ret <= 0)
792 				goto write_err;
793 
794 			filled = 1;
795 		}
796 
797 		count -= filled;
798 		done += filled;
799 		*ppos += filled;
800 		buf += filled;
801 	}
802 
803 	return done;
804 write_err:
805 	return -EFAULT;
806 }
807 
808 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
809 {
810 	unsigned int index;
811 	u64 virtaddr;
812 	unsigned long req_size, pgoff = 0;
813 	pgprot_t pg_prot;
814 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
815 
816 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
817 	if (index >= VFIO_PCI_ROM_REGION_INDEX)
818 		return -EINVAL;
819 
820 	if (vma->vm_end < vma->vm_start)
821 		return -EINVAL;
822 	if ((vma->vm_flags & VM_SHARED) == 0)
823 		return -EINVAL;
824 	if (index != VFIO_PCI_BAR2_REGION_INDEX)
825 		return -EINVAL;
826 
827 	pg_prot = vma->vm_page_prot;
828 	virtaddr = vma->vm_start;
829 	req_size = vma->vm_end - vma->vm_start;
830 	pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
831 
832 	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
833 }
834 
835 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
836 {
837 	if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
838 		return 1;
839 
840 	return 0;
841 }
842 
843 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
844 			unsigned int index, unsigned int start,
845 			unsigned int count, uint32_t flags,
846 			void *data)
847 {
848 	return 0;
849 }
850 
851 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
852 			unsigned int index, unsigned int start,
853 			unsigned int count, uint32_t flags, void *data)
854 {
855 	return 0;
856 }
857 
858 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
859 		unsigned int index, unsigned int start, unsigned int count,
860 		uint32_t flags, void *data)
861 {
862 	return 0;
863 }
864 
865 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
866 		unsigned int index, unsigned int start, unsigned int count,
867 		uint32_t flags, void *data)
868 {
869 	struct eventfd_ctx *trigger;
870 
871 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
872 		int fd = *(int *)data;
873 
874 		trigger = eventfd_ctx_fdget(fd);
875 		if (IS_ERR(trigger)) {
876 			gvt_vgpu_err("eventfd_ctx_fdget failed\n");
877 			return PTR_ERR(trigger);
878 		}
879 		vgpu->vdev.msi_trigger = trigger;
880 	}
881 
882 	return 0;
883 }
884 
885 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
886 		unsigned int index, unsigned int start, unsigned int count,
887 		void *data)
888 {
889 	int (*func)(struct intel_vgpu *vgpu, unsigned int index,
890 			unsigned int start, unsigned int count, uint32_t flags,
891 			void *data) = NULL;
892 
893 	switch (index) {
894 	case VFIO_PCI_INTX_IRQ_INDEX:
895 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
896 		case VFIO_IRQ_SET_ACTION_MASK:
897 			func = intel_vgpu_set_intx_mask;
898 			break;
899 		case VFIO_IRQ_SET_ACTION_UNMASK:
900 			func = intel_vgpu_set_intx_unmask;
901 			break;
902 		case VFIO_IRQ_SET_ACTION_TRIGGER:
903 			func = intel_vgpu_set_intx_trigger;
904 			break;
905 		}
906 		break;
907 	case VFIO_PCI_MSI_IRQ_INDEX:
908 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
909 		case VFIO_IRQ_SET_ACTION_MASK:
910 		case VFIO_IRQ_SET_ACTION_UNMASK:
911 			/* XXX Need masking support exported */
912 			break;
913 		case VFIO_IRQ_SET_ACTION_TRIGGER:
914 			func = intel_vgpu_set_msi_trigger;
915 			break;
916 		}
917 		break;
918 	}
919 
920 	if (!func)
921 		return -ENOTTY;
922 
923 	return func(vgpu, index, start, count, flags, data);
924 }
925 
926 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
927 			     unsigned long arg)
928 {
929 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
930 	unsigned long minsz;
931 
932 	gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
933 
934 	if (cmd == VFIO_DEVICE_GET_INFO) {
935 		struct vfio_device_info info;
936 
937 		minsz = offsetofend(struct vfio_device_info, num_irqs);
938 
939 		if (copy_from_user(&info, (void __user *)arg, minsz))
940 			return -EFAULT;
941 
942 		if (info.argsz < minsz)
943 			return -EINVAL;
944 
945 		info.flags = VFIO_DEVICE_FLAGS_PCI;
946 		info.flags |= VFIO_DEVICE_FLAGS_RESET;
947 		info.num_regions = VFIO_PCI_NUM_REGIONS;
948 		info.num_irqs = VFIO_PCI_NUM_IRQS;
949 
950 		return copy_to_user((void __user *)arg, &info, minsz) ?
951 			-EFAULT : 0;
952 
953 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
954 		struct vfio_region_info info;
955 		struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
956 		int i, ret;
957 		struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
958 		size_t size;
959 		int nr_areas = 1;
960 		int cap_type_id;
961 
962 		minsz = offsetofend(struct vfio_region_info, offset);
963 
964 		if (copy_from_user(&info, (void __user *)arg, minsz))
965 			return -EFAULT;
966 
967 		if (info.argsz < minsz)
968 			return -EINVAL;
969 
970 		switch (info.index) {
971 		case VFIO_PCI_CONFIG_REGION_INDEX:
972 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
973 			info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
974 			info.flags = VFIO_REGION_INFO_FLAG_READ |
975 				     VFIO_REGION_INFO_FLAG_WRITE;
976 			break;
977 		case VFIO_PCI_BAR0_REGION_INDEX:
978 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
979 			info.size = vgpu->cfg_space.bar[info.index].size;
980 			if (!info.size) {
981 				info.flags = 0;
982 				break;
983 			}
984 
985 			info.flags = VFIO_REGION_INFO_FLAG_READ |
986 				     VFIO_REGION_INFO_FLAG_WRITE;
987 			break;
988 		case VFIO_PCI_BAR1_REGION_INDEX:
989 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
990 			info.size = 0;
991 			info.flags = 0;
992 			break;
993 		case VFIO_PCI_BAR2_REGION_INDEX:
994 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
995 			info.flags = VFIO_REGION_INFO_FLAG_CAPS |
996 					VFIO_REGION_INFO_FLAG_MMAP |
997 					VFIO_REGION_INFO_FLAG_READ |
998 					VFIO_REGION_INFO_FLAG_WRITE;
999 			info.size = gvt_aperture_sz(vgpu->gvt);
1000 
1001 			size = sizeof(*sparse) +
1002 					(nr_areas * sizeof(*sparse->areas));
1003 			sparse = kzalloc(size, GFP_KERNEL);
1004 			if (!sparse)
1005 				return -ENOMEM;
1006 
1007 			sparse->nr_areas = nr_areas;
1008 			cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1009 			sparse->areas[0].offset =
1010 					PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1011 			sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1012 			break;
1013 
1014 		case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1015 			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1016 			info.size = 0;
1017 
1018 			info.flags = 0;
1019 			gvt_dbg_core("get region info bar:%d\n", info.index);
1020 			break;
1021 
1022 		case VFIO_PCI_ROM_REGION_INDEX:
1023 		case VFIO_PCI_VGA_REGION_INDEX:
1024 			gvt_dbg_core("get region info index:%d\n", info.index);
1025 			break;
1026 		default:
1027 			{
1028 				struct vfio_region_info_cap_type cap_type;
1029 
1030 				if (info.index >= VFIO_PCI_NUM_REGIONS +
1031 						vgpu->vdev.num_regions)
1032 					return -EINVAL;
1033 
1034 				i = info.index - VFIO_PCI_NUM_REGIONS;
1035 
1036 				info.offset =
1037 					VFIO_PCI_INDEX_TO_OFFSET(info.index);
1038 				info.size = vgpu->vdev.region[i].size;
1039 				info.flags = vgpu->vdev.region[i].flags;
1040 
1041 				cap_type.type = vgpu->vdev.region[i].type;
1042 				cap_type.subtype = vgpu->vdev.region[i].subtype;
1043 
1044 				ret = vfio_info_add_capability(&caps,
1045 						VFIO_REGION_INFO_CAP_TYPE,
1046 						&cap_type);
1047 				if (ret)
1048 					return ret;
1049 			}
1050 		}
1051 
1052 		if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1053 			switch (cap_type_id) {
1054 			case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1055 				ret = vfio_info_add_capability(&caps,
1056 					VFIO_REGION_INFO_CAP_SPARSE_MMAP,
1057 					sparse);
1058 				kfree(sparse);
1059 				if (ret)
1060 					return ret;
1061 				break;
1062 			default:
1063 				return -EINVAL;
1064 			}
1065 		}
1066 
1067 		if (caps.size) {
1068 			if (info.argsz < sizeof(info) + caps.size) {
1069 				info.argsz = sizeof(info) + caps.size;
1070 				info.cap_offset = 0;
1071 			} else {
1072 				vfio_info_cap_shift(&caps, sizeof(info));
1073 				if (copy_to_user((void __user *)arg +
1074 						  sizeof(info), caps.buf,
1075 						  caps.size)) {
1076 					kfree(caps.buf);
1077 					return -EFAULT;
1078 				}
1079 				info.cap_offset = sizeof(info);
1080 			}
1081 
1082 			kfree(caps.buf);
1083 		}
1084 
1085 		return copy_to_user((void __user *)arg, &info, minsz) ?
1086 			-EFAULT : 0;
1087 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1088 		struct vfio_irq_info info;
1089 
1090 		minsz = offsetofend(struct vfio_irq_info, count);
1091 
1092 		if (copy_from_user(&info, (void __user *)arg, minsz))
1093 			return -EFAULT;
1094 
1095 		if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1096 			return -EINVAL;
1097 
1098 		switch (info.index) {
1099 		case VFIO_PCI_INTX_IRQ_INDEX:
1100 		case VFIO_PCI_MSI_IRQ_INDEX:
1101 			break;
1102 		default:
1103 			return -EINVAL;
1104 		}
1105 
1106 		info.flags = VFIO_IRQ_INFO_EVENTFD;
1107 
1108 		info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1109 
1110 		if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1111 			info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1112 				       VFIO_IRQ_INFO_AUTOMASKED);
1113 		else
1114 			info.flags |= VFIO_IRQ_INFO_NORESIZE;
1115 
1116 		return copy_to_user((void __user *)arg, &info, minsz) ?
1117 			-EFAULT : 0;
1118 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
1119 		struct vfio_irq_set hdr;
1120 		u8 *data = NULL;
1121 		int ret = 0;
1122 		size_t data_size = 0;
1123 
1124 		minsz = offsetofend(struct vfio_irq_set, count);
1125 
1126 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1127 			return -EFAULT;
1128 
1129 		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1130 			int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1131 
1132 			ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1133 						VFIO_PCI_NUM_IRQS, &data_size);
1134 			if (ret) {
1135 				gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1136 				return -EINVAL;
1137 			}
1138 			if (data_size) {
1139 				data = memdup_user((void __user *)(arg + minsz),
1140 						   data_size);
1141 				if (IS_ERR(data))
1142 					return PTR_ERR(data);
1143 			}
1144 		}
1145 
1146 		ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1147 					hdr.start, hdr.count, data);
1148 		kfree(data);
1149 
1150 		return ret;
1151 	} else if (cmd == VFIO_DEVICE_RESET) {
1152 		intel_gvt_ops->vgpu_reset(vgpu);
1153 		return 0;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static ssize_t
1160 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1161 	     char *buf)
1162 {
1163 	struct mdev_device *mdev = mdev_from_dev(dev);
1164 
1165 	if (mdev) {
1166 		struct intel_vgpu *vgpu = (struct intel_vgpu *)
1167 			mdev_get_drvdata(mdev);
1168 		return sprintf(buf, "%d\n", vgpu->id);
1169 	}
1170 	return sprintf(buf, "\n");
1171 }
1172 
1173 static ssize_t
1174 hw_id_show(struct device *dev, struct device_attribute *attr,
1175 	   char *buf)
1176 {
1177 	struct mdev_device *mdev = mdev_from_dev(dev);
1178 
1179 	if (mdev) {
1180 		struct intel_vgpu *vgpu = (struct intel_vgpu *)
1181 			mdev_get_drvdata(mdev);
1182 		return sprintf(buf, "%u\n",
1183 			       vgpu->shadow_ctx->hw_id);
1184 	}
1185 	return sprintf(buf, "\n");
1186 }
1187 
1188 static DEVICE_ATTR_RO(vgpu_id);
1189 static DEVICE_ATTR_RO(hw_id);
1190 
1191 static struct attribute *intel_vgpu_attrs[] = {
1192 	&dev_attr_vgpu_id.attr,
1193 	&dev_attr_hw_id.attr,
1194 	NULL
1195 };
1196 
1197 static const struct attribute_group intel_vgpu_group = {
1198 	.name = "intel_vgpu",
1199 	.attrs = intel_vgpu_attrs,
1200 };
1201 
1202 static const struct attribute_group *intel_vgpu_groups[] = {
1203 	&intel_vgpu_group,
1204 	NULL,
1205 };
1206 
1207 static const struct mdev_parent_ops intel_vgpu_ops = {
1208 	.supported_type_groups	= intel_vgpu_type_groups,
1209 	.mdev_attr_groups       = intel_vgpu_groups,
1210 	.create			= intel_vgpu_create,
1211 	.remove			= intel_vgpu_remove,
1212 
1213 	.open			= intel_vgpu_open,
1214 	.release		= intel_vgpu_release,
1215 
1216 	.read			= intel_vgpu_read,
1217 	.write			= intel_vgpu_write,
1218 	.mmap			= intel_vgpu_mmap,
1219 	.ioctl			= intel_vgpu_ioctl,
1220 };
1221 
1222 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1223 {
1224 	if (!intel_gvt_init_vgpu_type_groups(gvt))
1225 		return -EFAULT;
1226 
1227 	intel_gvt_ops = ops;
1228 
1229 	return mdev_register_device(dev, &intel_vgpu_ops);
1230 }
1231 
1232 static void kvmgt_host_exit(struct device *dev, void *gvt)
1233 {
1234 	intel_gvt_cleanup_vgpu_type_groups(gvt);
1235 	mdev_unregister_device(dev);
1236 }
1237 
1238 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1239 {
1240 	struct kvmgt_guest_info *info;
1241 	struct kvm *kvm;
1242 	struct kvm_memory_slot *slot;
1243 	int idx;
1244 
1245 	if (!handle_valid(handle))
1246 		return -ESRCH;
1247 
1248 	info = (struct kvmgt_guest_info *)handle;
1249 	kvm = info->kvm;
1250 
1251 	idx = srcu_read_lock(&kvm->srcu);
1252 	slot = gfn_to_memslot(kvm, gfn);
1253 	if (!slot) {
1254 		srcu_read_unlock(&kvm->srcu, idx);
1255 		return -EINVAL;
1256 	}
1257 
1258 	spin_lock(&kvm->mmu_lock);
1259 
1260 	if (kvmgt_gfn_is_write_protected(info, gfn))
1261 		goto out;
1262 
1263 	kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1264 	kvmgt_protect_table_add(info, gfn);
1265 
1266 out:
1267 	spin_unlock(&kvm->mmu_lock);
1268 	srcu_read_unlock(&kvm->srcu, idx);
1269 	return 0;
1270 }
1271 
1272 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1273 {
1274 	struct kvmgt_guest_info *info;
1275 	struct kvm *kvm;
1276 	struct kvm_memory_slot *slot;
1277 	int idx;
1278 
1279 	if (!handle_valid(handle))
1280 		return 0;
1281 
1282 	info = (struct kvmgt_guest_info *)handle;
1283 	kvm = info->kvm;
1284 
1285 	idx = srcu_read_lock(&kvm->srcu);
1286 	slot = gfn_to_memslot(kvm, gfn);
1287 	if (!slot) {
1288 		srcu_read_unlock(&kvm->srcu, idx);
1289 		return -EINVAL;
1290 	}
1291 
1292 	spin_lock(&kvm->mmu_lock);
1293 
1294 	if (!kvmgt_gfn_is_write_protected(info, gfn))
1295 		goto out;
1296 
1297 	kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1298 	kvmgt_protect_table_del(info, gfn);
1299 
1300 out:
1301 	spin_unlock(&kvm->mmu_lock);
1302 	srcu_read_unlock(&kvm->srcu, idx);
1303 	return 0;
1304 }
1305 
1306 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1307 		const u8 *val, int len,
1308 		struct kvm_page_track_notifier_node *node)
1309 {
1310 	struct kvmgt_guest_info *info = container_of(node,
1311 					struct kvmgt_guest_info, track_node);
1312 
1313 	if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1314 		intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
1315 					(void *)val, len);
1316 }
1317 
1318 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1319 		struct kvm_memory_slot *slot,
1320 		struct kvm_page_track_notifier_node *node)
1321 {
1322 	int i;
1323 	gfn_t gfn;
1324 	struct kvmgt_guest_info *info = container_of(node,
1325 					struct kvmgt_guest_info, track_node);
1326 
1327 	spin_lock(&kvm->mmu_lock);
1328 	for (i = 0; i < slot->npages; i++) {
1329 		gfn = slot->base_gfn + i;
1330 		if (kvmgt_gfn_is_write_protected(info, gfn)) {
1331 			kvm_slot_page_track_remove_page(kvm, slot, gfn,
1332 						KVM_PAGE_TRACK_WRITE);
1333 			kvmgt_protect_table_del(info, gfn);
1334 		}
1335 	}
1336 	spin_unlock(&kvm->mmu_lock);
1337 }
1338 
1339 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1340 {
1341 	struct intel_vgpu *itr;
1342 	struct kvmgt_guest_info *info;
1343 	int id;
1344 	bool ret = false;
1345 
1346 	mutex_lock(&vgpu->gvt->lock);
1347 	for_each_active_vgpu(vgpu->gvt, itr, id) {
1348 		if (!handle_valid(itr->handle))
1349 			continue;
1350 
1351 		info = (struct kvmgt_guest_info *)itr->handle;
1352 		if (kvm && kvm == info->kvm) {
1353 			ret = true;
1354 			goto out;
1355 		}
1356 	}
1357 out:
1358 	mutex_unlock(&vgpu->gvt->lock);
1359 	return ret;
1360 }
1361 
1362 static int kvmgt_guest_init(struct mdev_device *mdev)
1363 {
1364 	struct kvmgt_guest_info *info;
1365 	struct intel_vgpu *vgpu;
1366 	struct kvm *kvm;
1367 
1368 	vgpu = mdev_get_drvdata(mdev);
1369 	if (handle_valid(vgpu->handle))
1370 		return -EEXIST;
1371 
1372 	kvm = vgpu->vdev.kvm;
1373 	if (!kvm || kvm->mm != current->mm) {
1374 		gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1375 		return -ESRCH;
1376 	}
1377 
1378 	if (__kvmgt_vgpu_exist(vgpu, kvm))
1379 		return -EEXIST;
1380 
1381 	info = vzalloc(sizeof(struct kvmgt_guest_info));
1382 	if (!info)
1383 		return -ENOMEM;
1384 
1385 	vgpu->handle = (unsigned long)info;
1386 	info->vgpu = vgpu;
1387 	info->kvm = kvm;
1388 	kvm_get_kvm(info->kvm);
1389 
1390 	kvmgt_protect_table_init(info);
1391 	gvt_cache_init(vgpu);
1392 
1393 	info->track_node.track_write = kvmgt_page_track_write;
1394 	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1395 	kvm_page_track_register_notifier(kvm, &info->track_node);
1396 
1397 	return 0;
1398 }
1399 
1400 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1401 {
1402 	kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1403 	kvm_put_kvm(info->kvm);
1404 	kvmgt_protect_table_destroy(info);
1405 	gvt_cache_destroy(info->vgpu);
1406 	vfree(info);
1407 
1408 	return true;
1409 }
1410 
1411 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1412 {
1413 	/* nothing to do here */
1414 	return 0;
1415 }
1416 
1417 static void kvmgt_detach_vgpu(unsigned long handle)
1418 {
1419 	/* nothing to do here */
1420 }
1421 
1422 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1423 {
1424 	struct kvmgt_guest_info *info;
1425 	struct intel_vgpu *vgpu;
1426 
1427 	if (!handle_valid(handle))
1428 		return -ESRCH;
1429 
1430 	info = (struct kvmgt_guest_info *)handle;
1431 	vgpu = info->vgpu;
1432 
1433 	if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1434 		return 0;
1435 
1436 	return -EFAULT;
1437 }
1438 
1439 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1440 {
1441 	unsigned long iova, pfn;
1442 	struct kvmgt_guest_info *info;
1443 	struct device *dev;
1444 	struct intel_vgpu *vgpu;
1445 	int rc;
1446 
1447 	if (!handle_valid(handle))
1448 		return INTEL_GVT_INVALID_ADDR;
1449 
1450 	info = (struct kvmgt_guest_info *)handle;
1451 	vgpu = info->vgpu;
1452 	iova = gvt_cache_find(info->vgpu, gfn);
1453 	if (iova != INTEL_GVT_INVALID_ADDR)
1454 		return iova;
1455 
1456 	pfn = INTEL_GVT_INVALID_ADDR;
1457 	dev = mdev_dev(info->vgpu->vdev.mdev);
1458 	rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1459 	if (rc != 1) {
1460 		gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1461 			gfn, rc);
1462 		return INTEL_GVT_INVALID_ADDR;
1463 	}
1464 	/* transfer to host iova for GFX to use DMA */
1465 	rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1466 	if (rc) {
1467 		gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1468 		vfio_unpin_pages(dev, &gfn, 1);
1469 		return INTEL_GVT_INVALID_ADDR;
1470 	}
1471 
1472 	gvt_cache_add(info->vgpu, gfn, iova);
1473 	return iova;
1474 }
1475 
1476 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1477 			void *buf, unsigned long len, bool write)
1478 {
1479 	struct kvmgt_guest_info *info;
1480 	struct kvm *kvm;
1481 	int idx, ret;
1482 	bool kthread = current->mm == NULL;
1483 
1484 	if (!handle_valid(handle))
1485 		return -ESRCH;
1486 
1487 	info = (struct kvmgt_guest_info *)handle;
1488 	kvm = info->kvm;
1489 
1490 	if (kthread)
1491 		use_mm(kvm->mm);
1492 
1493 	idx = srcu_read_lock(&kvm->srcu);
1494 	ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1495 		      kvm_read_guest(kvm, gpa, buf, len);
1496 	srcu_read_unlock(&kvm->srcu, idx);
1497 
1498 	if (kthread)
1499 		unuse_mm(kvm->mm);
1500 
1501 	return ret;
1502 }
1503 
1504 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1505 			void *buf, unsigned long len)
1506 {
1507 	return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1508 }
1509 
1510 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1511 			void *buf, unsigned long len)
1512 {
1513 	return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1514 }
1515 
1516 static unsigned long kvmgt_virt_to_pfn(void *addr)
1517 {
1518 	return PFN_DOWN(__pa(addr));
1519 }
1520 
1521 struct intel_gvt_mpt kvmgt_mpt = {
1522 	.host_init = kvmgt_host_init,
1523 	.host_exit = kvmgt_host_exit,
1524 	.attach_vgpu = kvmgt_attach_vgpu,
1525 	.detach_vgpu = kvmgt_detach_vgpu,
1526 	.inject_msi = kvmgt_inject_msi,
1527 	.from_virt_to_mfn = kvmgt_virt_to_pfn,
1528 	.set_wp_page = kvmgt_write_protect_add,
1529 	.unset_wp_page = kvmgt_write_protect_remove,
1530 	.read_gpa = kvmgt_read_gpa,
1531 	.write_gpa = kvmgt_write_gpa,
1532 	.gfn_to_mfn = kvmgt_gfn_to_pfn,
1533 };
1534 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1535 
1536 static int __init kvmgt_init(void)
1537 {
1538 	return 0;
1539 }
1540 
1541 static void __exit kvmgt_exit(void)
1542 {
1543 }
1544 
1545 module_init(kvmgt_init);
1546 module_exit(kvmgt_exit);
1547 
1548 MODULE_LICENSE("GPL and additional rights");
1549 MODULE_AUTHOR("Intel Corporation");
1550