116845c5dSGerd Hoffmann // SPDX-License-Identifier: GPL-2.0
216845c5dSGerd Hoffmann #include "virtgpu_drv.h"
316845c5dSGerd Hoffmann 
4ea5ea3d8SDavid Stevens #include <linux/dma-mapping.h>
5ea5ea3d8SDavid Stevens 
virtio_gpu_vram_free(struct drm_gem_object * obj)616845c5dSGerd Hoffmann static void virtio_gpu_vram_free(struct drm_gem_object *obj)
716845c5dSGerd Hoffmann {
816845c5dSGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
916845c5dSGerd Hoffmann 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
1016845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1116845c5dSGerd Hoffmann 	bool unmap;
1216845c5dSGerd Hoffmann 
1316845c5dSGerd Hoffmann 	if (bo->created) {
1416845c5dSGerd Hoffmann 		spin_lock(&vgdev->host_visible_lock);
1516845c5dSGerd Hoffmann 		unmap = drm_mm_node_allocated(&vram->vram_node);
1616845c5dSGerd Hoffmann 		spin_unlock(&vgdev->host_visible_lock);
1716845c5dSGerd Hoffmann 
1816845c5dSGerd Hoffmann 		if (unmap)
1916845c5dSGerd Hoffmann 			virtio_gpu_cmd_unmap(vgdev, bo);
2016845c5dSGerd Hoffmann 
2116845c5dSGerd Hoffmann 		virtio_gpu_cmd_unref_resource(vgdev, bo);
2216845c5dSGerd Hoffmann 		virtio_gpu_notify(vgdev);
2316845c5dSGerd Hoffmann 		return;
2416845c5dSGerd Hoffmann 	}
2516845c5dSGerd Hoffmann }
2616845c5dSGerd Hoffmann 
2716845c5dSGerd Hoffmann static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
2816845c5dSGerd Hoffmann 	.open = drm_gem_vm_open,
2916845c5dSGerd Hoffmann 	.close = drm_gem_vm_close,
3016845c5dSGerd Hoffmann };
3116845c5dSGerd Hoffmann 
virtio_gpu_vram_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)3216845c5dSGerd Hoffmann static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
3316845c5dSGerd Hoffmann 				struct vm_area_struct *vma)
3416845c5dSGerd Hoffmann {
3516845c5dSGerd Hoffmann 	int ret;
3616845c5dSGerd Hoffmann 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
3716845c5dSGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
3816845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
3916845c5dSGerd Hoffmann 	unsigned long vm_size = vma->vm_end - vma->vm_start;
4016845c5dSGerd Hoffmann 
4116845c5dSGerd Hoffmann 	if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
4216845c5dSGerd Hoffmann 		return -EINVAL;
4316845c5dSGerd Hoffmann 
4416845c5dSGerd Hoffmann 	wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
4516845c5dSGerd Hoffmann 	if (vram->map_state != STATE_OK)
4616845c5dSGerd Hoffmann 		return -EINVAL;
4716845c5dSGerd Hoffmann 
4816845c5dSGerd Hoffmann 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
49*1c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
5016845c5dSGerd Hoffmann 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5116845c5dSGerd Hoffmann 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
5216845c5dSGerd Hoffmann 	vma->vm_ops = &virtio_gpu_vram_vm_ops;
5316845c5dSGerd Hoffmann 
5416845c5dSGerd Hoffmann 	if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
5516845c5dSGerd Hoffmann 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
5616845c5dSGerd Hoffmann 	else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
5716845c5dSGerd Hoffmann 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
5816845c5dSGerd Hoffmann 
5916845c5dSGerd Hoffmann 	/* Partial mappings of GEM buffers don't happen much in practice. */
6016845c5dSGerd Hoffmann 	if (vm_size != vram->vram_node.size)
6116845c5dSGerd Hoffmann 		return -EINVAL;
6216845c5dSGerd Hoffmann 
6316845c5dSGerd Hoffmann 	ret = io_remap_pfn_range(vma, vma->vm_start,
6416845c5dSGerd Hoffmann 				 vram->vram_node.start >> PAGE_SHIFT,
6516845c5dSGerd Hoffmann 				 vm_size, vma->vm_page_prot);
6616845c5dSGerd Hoffmann 	return ret;
6716845c5dSGerd Hoffmann }
6816845c5dSGerd Hoffmann 
virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object * bo,struct device * dev,enum dma_data_direction dir)69ea5ea3d8SDavid Stevens struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
70ea5ea3d8SDavid Stevens 					     struct device *dev,
71ea5ea3d8SDavid Stevens 					     enum dma_data_direction dir)
72ea5ea3d8SDavid Stevens {
73ea5ea3d8SDavid Stevens 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
74ea5ea3d8SDavid Stevens 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
75ea5ea3d8SDavid Stevens 	struct sg_table *sgt;
76ea5ea3d8SDavid Stevens 	dma_addr_t addr;
77ea5ea3d8SDavid Stevens 	int ret;
78ea5ea3d8SDavid Stevens 
79ea5ea3d8SDavid Stevens 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
80ea5ea3d8SDavid Stevens 	if (!sgt)
81ea5ea3d8SDavid Stevens 		return ERR_PTR(-ENOMEM);
82ea5ea3d8SDavid Stevens 
83ea5ea3d8SDavid Stevens 	if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
84ea5ea3d8SDavid Stevens 		// Virtio devices can access the dma-buf via its UUID. Return a stub
85ea5ea3d8SDavid Stevens 		// sg_table so the dma-buf API still works.
86ea5ea3d8SDavid Stevens 		if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
87ea5ea3d8SDavid Stevens 			ret = -EIO;
88ea5ea3d8SDavid Stevens 			goto out;
89ea5ea3d8SDavid Stevens 		}
90ea5ea3d8SDavid Stevens 		return sgt;
91ea5ea3d8SDavid Stevens 	}
92ea5ea3d8SDavid Stevens 
93ea5ea3d8SDavid Stevens 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
94ea5ea3d8SDavid Stevens 	if (ret)
95ea5ea3d8SDavid Stevens 		goto out;
96ea5ea3d8SDavid Stevens 
97ea5ea3d8SDavid Stevens 	addr = dma_map_resource(dev, vram->vram_node.start,
98ea5ea3d8SDavid Stevens 				vram->vram_node.size, dir,
99ea5ea3d8SDavid Stevens 				DMA_ATTR_SKIP_CPU_SYNC);
100ea5ea3d8SDavid Stevens 	ret = dma_mapping_error(dev, addr);
101ea5ea3d8SDavid Stevens 	if (ret)
102ea5ea3d8SDavid Stevens 		goto out;
103ea5ea3d8SDavid Stevens 
104ea5ea3d8SDavid Stevens 	sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
105ea5ea3d8SDavid Stevens 	sg_dma_address(sgt->sgl) = addr;
106ea5ea3d8SDavid Stevens 	sg_dma_len(sgt->sgl) = vram->vram_node.size;
107ea5ea3d8SDavid Stevens 
108ea5ea3d8SDavid Stevens 	return sgt;
109ea5ea3d8SDavid Stevens out:
110ea5ea3d8SDavid Stevens 	sg_free_table(sgt);
111ea5ea3d8SDavid Stevens 	kfree(sgt);
112ea5ea3d8SDavid Stevens 	return ERR_PTR(ret);
113ea5ea3d8SDavid Stevens }
114ea5ea3d8SDavid Stevens 
virtio_gpu_vram_unmap_dma_buf(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)115ea5ea3d8SDavid Stevens void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
116ea5ea3d8SDavid Stevens 				   struct sg_table *sgt,
117ea5ea3d8SDavid Stevens 				   enum dma_data_direction dir)
118ea5ea3d8SDavid Stevens {
119ea5ea3d8SDavid Stevens 	if (sgt->nents) {
120ea5ea3d8SDavid Stevens 		dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
121ea5ea3d8SDavid Stevens 				   sg_dma_len(sgt->sgl), dir,
122ea5ea3d8SDavid Stevens 				   DMA_ATTR_SKIP_CPU_SYNC);
123ea5ea3d8SDavid Stevens 	}
124ea5ea3d8SDavid Stevens 	sg_free_table(sgt);
125ea5ea3d8SDavid Stevens 	kfree(sgt);
126ea5ea3d8SDavid Stevens }
127ea5ea3d8SDavid Stevens 
12816845c5dSGerd Hoffmann static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
12916845c5dSGerd Hoffmann 	.open = virtio_gpu_gem_object_open,
13016845c5dSGerd Hoffmann 	.close = virtio_gpu_gem_object_close,
13116845c5dSGerd Hoffmann 	.free = virtio_gpu_vram_free,
13216845c5dSGerd Hoffmann 	.mmap = virtio_gpu_vram_mmap,
133ccae03f3SChia-I Wu 	.export = virtgpu_gem_prime_export,
13416845c5dSGerd Hoffmann };
13516845c5dSGerd Hoffmann 
virtio_gpu_is_vram(struct virtio_gpu_object * bo)13616845c5dSGerd Hoffmann bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
13716845c5dSGerd Hoffmann {
13816845c5dSGerd Hoffmann 	return bo->base.base.funcs == &virtio_gpu_vram_funcs;
13916845c5dSGerd Hoffmann }
14016845c5dSGerd Hoffmann 
virtio_gpu_vram_map(struct virtio_gpu_object * bo)14116845c5dSGerd Hoffmann static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
14216845c5dSGerd Hoffmann {
14316845c5dSGerd Hoffmann 	int ret;
14416845c5dSGerd Hoffmann 	uint64_t offset;
14516845c5dSGerd Hoffmann 	struct virtio_gpu_object_array *objs;
14616845c5dSGerd Hoffmann 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
14716845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
14816845c5dSGerd Hoffmann 
14916845c5dSGerd Hoffmann 	if (!vgdev->has_host_visible)
15016845c5dSGerd Hoffmann 		return -EINVAL;
15116845c5dSGerd Hoffmann 
15216845c5dSGerd Hoffmann 	spin_lock(&vgdev->host_visible_lock);
15316845c5dSGerd Hoffmann 	ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
15416845c5dSGerd Hoffmann 				 bo->base.base.size);
15516845c5dSGerd Hoffmann 	spin_unlock(&vgdev->host_visible_lock);
15616845c5dSGerd Hoffmann 
15716845c5dSGerd Hoffmann 	if (ret)
15816845c5dSGerd Hoffmann 		return ret;
15916845c5dSGerd Hoffmann 
16016845c5dSGerd Hoffmann 	objs = virtio_gpu_array_alloc(1);
16116845c5dSGerd Hoffmann 	if (!objs) {
16216845c5dSGerd Hoffmann 		ret = -ENOMEM;
16316845c5dSGerd Hoffmann 		goto err_remove_node;
16416845c5dSGerd Hoffmann 	}
16516845c5dSGerd Hoffmann 
16616845c5dSGerd Hoffmann 	virtio_gpu_array_add_obj(objs, &bo->base.base);
16716845c5dSGerd Hoffmann 	/*TODO: Add an error checking helper function in drm_mm.h */
16816845c5dSGerd Hoffmann 	offset = vram->vram_node.start - vgdev->host_visible_region.addr;
16916845c5dSGerd Hoffmann 
17016845c5dSGerd Hoffmann 	ret = virtio_gpu_cmd_map(vgdev, objs, offset);
17116845c5dSGerd Hoffmann 	if (ret) {
17216845c5dSGerd Hoffmann 		virtio_gpu_array_put_free(objs);
17316845c5dSGerd Hoffmann 		goto err_remove_node;
17416845c5dSGerd Hoffmann 	}
17516845c5dSGerd Hoffmann 
17616845c5dSGerd Hoffmann 	return 0;
17716845c5dSGerd Hoffmann 
17816845c5dSGerd Hoffmann err_remove_node:
17916845c5dSGerd Hoffmann 	spin_lock(&vgdev->host_visible_lock);
18016845c5dSGerd Hoffmann 	drm_mm_remove_node(&vram->vram_node);
18116845c5dSGerd Hoffmann 	spin_unlock(&vgdev->host_visible_lock);
18216845c5dSGerd Hoffmann 	return ret;
18316845c5dSGerd Hoffmann }
18416845c5dSGerd Hoffmann 
virtio_gpu_vram_create(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_params * params,struct virtio_gpu_object ** bo_ptr)18516845c5dSGerd Hoffmann int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
18616845c5dSGerd Hoffmann 			   struct virtio_gpu_object_params *params,
18716845c5dSGerd Hoffmann 			   struct virtio_gpu_object **bo_ptr)
18816845c5dSGerd Hoffmann {
18916845c5dSGerd Hoffmann 	struct drm_gem_object *obj;
19016845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram;
19116845c5dSGerd Hoffmann 	int ret;
19216845c5dSGerd Hoffmann 
19316845c5dSGerd Hoffmann 	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
19416845c5dSGerd Hoffmann 	if (!vram)
19516845c5dSGerd Hoffmann 		return -ENOMEM;
19616845c5dSGerd Hoffmann 
19716845c5dSGerd Hoffmann 	obj = &vram->base.base.base;
19816845c5dSGerd Hoffmann 	obj->funcs = &virtio_gpu_vram_funcs;
199b39100a5SChia-I Wu 
200b39100a5SChia-I Wu 	params->size = PAGE_ALIGN(params->size);
20116845c5dSGerd Hoffmann 	drm_gem_private_object_init(vgdev->ddev, obj, params->size);
20216845c5dSGerd Hoffmann 
20316845c5dSGerd Hoffmann 	/* Create fake offset */
20416845c5dSGerd Hoffmann 	ret = drm_gem_create_mmap_offset(obj);
20516845c5dSGerd Hoffmann 	if (ret) {
20616845c5dSGerd Hoffmann 		kfree(vram);
20716845c5dSGerd Hoffmann 		return ret;
20816845c5dSGerd Hoffmann 	}
20916845c5dSGerd Hoffmann 
21016845c5dSGerd Hoffmann 	ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
21116845c5dSGerd Hoffmann 	if (ret) {
21216845c5dSGerd Hoffmann 		kfree(vram);
21316845c5dSGerd Hoffmann 		return ret;
21416845c5dSGerd Hoffmann 	}
21516845c5dSGerd Hoffmann 
216897b4d1aSGerd Hoffmann 	virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
217897b4d1aSGerd Hoffmann 					    0);
21816845c5dSGerd Hoffmann 	if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
21916845c5dSGerd Hoffmann 		ret = virtio_gpu_vram_map(&vram->base);
22016845c5dSGerd Hoffmann 		if (ret) {
22116845c5dSGerd Hoffmann 			virtio_gpu_vram_free(obj);
22216845c5dSGerd Hoffmann 			return ret;
22316845c5dSGerd Hoffmann 		}
22416845c5dSGerd Hoffmann 	}
22516845c5dSGerd Hoffmann 
22616845c5dSGerd Hoffmann 	*bo_ptr = &vram->base;
22716845c5dSGerd Hoffmann 	return 0;
22816845c5dSGerd Hoffmann }
229