116845c5dSGerd Hoffmann // SPDX-License-Identifier: GPL-2.0
216845c5dSGerd Hoffmann #include "virtgpu_drv.h"
316845c5dSGerd Hoffmann 
416845c5dSGerd Hoffmann static void virtio_gpu_vram_free(struct drm_gem_object *obj)
516845c5dSGerd Hoffmann {
616845c5dSGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
716845c5dSGerd Hoffmann 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
816845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
916845c5dSGerd Hoffmann 	bool unmap;
1016845c5dSGerd Hoffmann 
1116845c5dSGerd Hoffmann 	if (bo->created) {
1216845c5dSGerd Hoffmann 		spin_lock(&vgdev->host_visible_lock);
1316845c5dSGerd Hoffmann 		unmap = drm_mm_node_allocated(&vram->vram_node);
1416845c5dSGerd Hoffmann 		spin_unlock(&vgdev->host_visible_lock);
1516845c5dSGerd Hoffmann 
1616845c5dSGerd Hoffmann 		if (unmap)
1716845c5dSGerd Hoffmann 			virtio_gpu_cmd_unmap(vgdev, bo);
1816845c5dSGerd Hoffmann 
1916845c5dSGerd Hoffmann 		virtio_gpu_cmd_unref_resource(vgdev, bo);
2016845c5dSGerd Hoffmann 		virtio_gpu_notify(vgdev);
2116845c5dSGerd Hoffmann 		return;
2216845c5dSGerd Hoffmann 	}
2316845c5dSGerd Hoffmann }
2416845c5dSGerd Hoffmann 
2516845c5dSGerd Hoffmann static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
2616845c5dSGerd Hoffmann 	.open = drm_gem_vm_open,
2716845c5dSGerd Hoffmann 	.close = drm_gem_vm_close,
2816845c5dSGerd Hoffmann };
2916845c5dSGerd Hoffmann 
3016845c5dSGerd Hoffmann static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
3116845c5dSGerd Hoffmann 				struct vm_area_struct *vma)
3216845c5dSGerd Hoffmann {
3316845c5dSGerd Hoffmann 	int ret;
3416845c5dSGerd Hoffmann 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
3516845c5dSGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
3616845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
3716845c5dSGerd Hoffmann 	unsigned long vm_size = vma->vm_end - vma->vm_start;
3816845c5dSGerd Hoffmann 
3916845c5dSGerd Hoffmann 	if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
4016845c5dSGerd Hoffmann 		return -EINVAL;
4116845c5dSGerd Hoffmann 
4216845c5dSGerd Hoffmann 	wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
4316845c5dSGerd Hoffmann 	if (vram->map_state != STATE_OK)
4416845c5dSGerd Hoffmann 		return -EINVAL;
4516845c5dSGerd Hoffmann 
4616845c5dSGerd Hoffmann 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
4716845c5dSGerd Hoffmann 	vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
4816845c5dSGerd Hoffmann 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4916845c5dSGerd Hoffmann 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
5016845c5dSGerd Hoffmann 	vma->vm_ops = &virtio_gpu_vram_vm_ops;
5116845c5dSGerd Hoffmann 
5216845c5dSGerd Hoffmann 	if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
5316845c5dSGerd Hoffmann 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
5416845c5dSGerd Hoffmann 	else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
5516845c5dSGerd Hoffmann 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
5616845c5dSGerd Hoffmann 
5716845c5dSGerd Hoffmann 	/* Partial mappings of GEM buffers don't happen much in practice. */
5816845c5dSGerd Hoffmann 	if (vm_size != vram->vram_node.size)
5916845c5dSGerd Hoffmann 		return -EINVAL;
6016845c5dSGerd Hoffmann 
6116845c5dSGerd Hoffmann 	ret = io_remap_pfn_range(vma, vma->vm_start,
6216845c5dSGerd Hoffmann 				 vram->vram_node.start >> PAGE_SHIFT,
6316845c5dSGerd Hoffmann 				 vm_size, vma->vm_page_prot);
6416845c5dSGerd Hoffmann 	return ret;
6516845c5dSGerd Hoffmann }
6616845c5dSGerd Hoffmann 
6716845c5dSGerd Hoffmann static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
6816845c5dSGerd Hoffmann 	.open = virtio_gpu_gem_object_open,
6916845c5dSGerd Hoffmann 	.close = virtio_gpu_gem_object_close,
7016845c5dSGerd Hoffmann 	.free = virtio_gpu_vram_free,
7116845c5dSGerd Hoffmann 	.mmap = virtio_gpu_vram_mmap,
72*ccae03f3SChia-I Wu 	.export = virtgpu_gem_prime_export,
7316845c5dSGerd Hoffmann };
7416845c5dSGerd Hoffmann 
7516845c5dSGerd Hoffmann bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
7616845c5dSGerd Hoffmann {
7716845c5dSGerd Hoffmann 	return bo->base.base.funcs == &virtio_gpu_vram_funcs;
7816845c5dSGerd Hoffmann }
7916845c5dSGerd Hoffmann 
8016845c5dSGerd Hoffmann static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
8116845c5dSGerd Hoffmann {
8216845c5dSGerd Hoffmann 	int ret;
8316845c5dSGerd Hoffmann 	uint64_t offset;
8416845c5dSGerd Hoffmann 	struct virtio_gpu_object_array *objs;
8516845c5dSGerd Hoffmann 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
8616845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
8716845c5dSGerd Hoffmann 
8816845c5dSGerd Hoffmann 	if (!vgdev->has_host_visible)
8916845c5dSGerd Hoffmann 		return -EINVAL;
9016845c5dSGerd Hoffmann 
9116845c5dSGerd Hoffmann 	spin_lock(&vgdev->host_visible_lock);
9216845c5dSGerd Hoffmann 	ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
9316845c5dSGerd Hoffmann 				 bo->base.base.size);
9416845c5dSGerd Hoffmann 	spin_unlock(&vgdev->host_visible_lock);
9516845c5dSGerd Hoffmann 
9616845c5dSGerd Hoffmann 	if (ret)
9716845c5dSGerd Hoffmann 		return ret;
9816845c5dSGerd Hoffmann 
9916845c5dSGerd Hoffmann 	objs = virtio_gpu_array_alloc(1);
10016845c5dSGerd Hoffmann 	if (!objs) {
10116845c5dSGerd Hoffmann 		ret = -ENOMEM;
10216845c5dSGerd Hoffmann 		goto err_remove_node;
10316845c5dSGerd Hoffmann 	}
10416845c5dSGerd Hoffmann 
10516845c5dSGerd Hoffmann 	virtio_gpu_array_add_obj(objs, &bo->base.base);
10616845c5dSGerd Hoffmann 	/*TODO: Add an error checking helper function in drm_mm.h */
10716845c5dSGerd Hoffmann 	offset = vram->vram_node.start - vgdev->host_visible_region.addr;
10816845c5dSGerd Hoffmann 
10916845c5dSGerd Hoffmann 	ret = virtio_gpu_cmd_map(vgdev, objs, offset);
11016845c5dSGerd Hoffmann 	if (ret) {
11116845c5dSGerd Hoffmann 		virtio_gpu_array_put_free(objs);
11216845c5dSGerd Hoffmann 		goto err_remove_node;
11316845c5dSGerd Hoffmann 	}
11416845c5dSGerd Hoffmann 
11516845c5dSGerd Hoffmann 	return 0;
11616845c5dSGerd Hoffmann 
11716845c5dSGerd Hoffmann err_remove_node:
11816845c5dSGerd Hoffmann 	spin_lock(&vgdev->host_visible_lock);
11916845c5dSGerd Hoffmann 	drm_mm_remove_node(&vram->vram_node);
12016845c5dSGerd Hoffmann 	spin_unlock(&vgdev->host_visible_lock);
12116845c5dSGerd Hoffmann 	return ret;
12216845c5dSGerd Hoffmann }
12316845c5dSGerd Hoffmann 
12416845c5dSGerd Hoffmann int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
12516845c5dSGerd Hoffmann 			   struct virtio_gpu_object_params *params,
12616845c5dSGerd Hoffmann 			   struct virtio_gpu_object **bo_ptr)
12716845c5dSGerd Hoffmann {
12816845c5dSGerd Hoffmann 	struct drm_gem_object *obj;
12916845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram;
13016845c5dSGerd Hoffmann 	int ret;
13116845c5dSGerd Hoffmann 
13216845c5dSGerd Hoffmann 	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
13316845c5dSGerd Hoffmann 	if (!vram)
13416845c5dSGerd Hoffmann 		return -ENOMEM;
13516845c5dSGerd Hoffmann 
13616845c5dSGerd Hoffmann 	obj = &vram->base.base.base;
13716845c5dSGerd Hoffmann 	obj->funcs = &virtio_gpu_vram_funcs;
138b39100a5SChia-I Wu 
139b39100a5SChia-I Wu 	params->size = PAGE_ALIGN(params->size);
14016845c5dSGerd Hoffmann 	drm_gem_private_object_init(vgdev->ddev, obj, params->size);
14116845c5dSGerd Hoffmann 
14216845c5dSGerd Hoffmann 	/* Create fake offset */
14316845c5dSGerd Hoffmann 	ret = drm_gem_create_mmap_offset(obj);
14416845c5dSGerd Hoffmann 	if (ret) {
14516845c5dSGerd Hoffmann 		kfree(vram);
14616845c5dSGerd Hoffmann 		return ret;
14716845c5dSGerd Hoffmann 	}
14816845c5dSGerd Hoffmann 
14916845c5dSGerd Hoffmann 	ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
15016845c5dSGerd Hoffmann 	if (ret) {
15116845c5dSGerd Hoffmann 		kfree(vram);
15216845c5dSGerd Hoffmann 		return ret;
15316845c5dSGerd Hoffmann 	}
15416845c5dSGerd Hoffmann 
155897b4d1aSGerd Hoffmann 	virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
156897b4d1aSGerd Hoffmann 					    0);
15716845c5dSGerd Hoffmann 	if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
15816845c5dSGerd Hoffmann 		ret = virtio_gpu_vram_map(&vram->base);
15916845c5dSGerd Hoffmann 		if (ret) {
16016845c5dSGerd Hoffmann 			virtio_gpu_vram_free(obj);
16116845c5dSGerd Hoffmann 			return ret;
16216845c5dSGerd Hoffmann 		}
16316845c5dSGerd Hoffmann 	}
16416845c5dSGerd Hoffmann 
16516845c5dSGerd Hoffmann 	*bo_ptr = &vram->base;
16616845c5dSGerd Hoffmann 	return 0;
16716845c5dSGerd Hoffmann }
168