1 // SPDX-License-Identifier: GPL-2.0 2 #include "virtgpu_drv.h" 3 4 static void virtio_gpu_vram_free(struct drm_gem_object *obj) 5 { 6 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 7 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 8 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); 9 bool unmap; 10 11 if (bo->created) { 12 spin_lock(&vgdev->host_visible_lock); 13 unmap = drm_mm_node_allocated(&vram->vram_node); 14 spin_unlock(&vgdev->host_visible_lock); 15 16 if (unmap) 17 virtio_gpu_cmd_unmap(vgdev, bo); 18 19 virtio_gpu_cmd_unref_resource(vgdev, bo); 20 virtio_gpu_notify(vgdev); 21 return; 22 } 23 } 24 25 static const struct vm_operations_struct virtio_gpu_vram_vm_ops = { 26 .open = drm_gem_vm_open, 27 .close = drm_gem_vm_close, 28 }; 29 30 static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, 31 struct vm_area_struct *vma) 32 { 33 int ret; 34 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 35 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 36 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); 37 unsigned long vm_size = vma->vm_end - vma->vm_start; 38 39 if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) 40 return -EINVAL; 41 42 wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING); 43 if (vram->map_state != STATE_OK) 44 return -EINVAL; 45 46 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 47 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 48 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 49 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 50 vma->vm_ops = &virtio_gpu_vram_vm_ops; 51 52 if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC) 53 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 54 else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) 55 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 56 57 /* Partial mappings of GEM buffers don't happen much in practice. */ 58 if (vm_size != vram->vram_node.size) 59 return -EINVAL; 60 61 ret = io_remap_pfn_range(vma, vma->vm_start, 62 vram->vram_node.start >> PAGE_SHIFT, 63 vm_size, vma->vm_page_prot); 64 return ret; 65 } 66 67 static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { 68 .open = virtio_gpu_gem_object_open, 69 .close = virtio_gpu_gem_object_close, 70 .free = virtio_gpu_vram_free, 71 .mmap = virtio_gpu_vram_mmap, 72 }; 73 74 bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) 75 { 76 return bo->base.base.funcs == &virtio_gpu_vram_funcs; 77 } 78 79 static int virtio_gpu_vram_map(struct virtio_gpu_object *bo) 80 { 81 int ret; 82 uint64_t offset; 83 struct virtio_gpu_object_array *objs; 84 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 85 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); 86 87 if (!vgdev->has_host_visible) 88 return -EINVAL; 89 90 spin_lock(&vgdev->host_visible_lock); 91 ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node, 92 bo->base.base.size); 93 spin_unlock(&vgdev->host_visible_lock); 94 95 if (ret) 96 return ret; 97 98 objs = virtio_gpu_array_alloc(1); 99 if (!objs) { 100 ret = -ENOMEM; 101 goto err_remove_node; 102 } 103 104 virtio_gpu_array_add_obj(objs, &bo->base.base); 105 /*TODO: Add an error checking helper function in drm_mm.h */ 106 offset = vram->vram_node.start - vgdev->host_visible_region.addr; 107 108 ret = virtio_gpu_cmd_map(vgdev, objs, offset); 109 if (ret) { 110 virtio_gpu_array_put_free(objs); 111 goto err_remove_node; 112 } 113 114 return 0; 115 116 err_remove_node: 117 spin_lock(&vgdev->host_visible_lock); 118 drm_mm_remove_node(&vram->vram_node); 119 spin_unlock(&vgdev->host_visible_lock); 120 return ret; 121 } 122 123 int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, 124 struct virtio_gpu_object_params *params, 125 struct virtio_gpu_object **bo_ptr) 126 { 127 struct drm_gem_object *obj; 128 struct virtio_gpu_object_vram *vram; 129 int ret; 130 131 vram = kzalloc(sizeof(*vram), GFP_KERNEL); 132 if (!vram) 133 return -ENOMEM; 134 135 obj = &vram->base.base.base; 136 obj->funcs = &virtio_gpu_vram_funcs; 137 drm_gem_private_object_init(vgdev->ddev, obj, params->size); 138 139 /* Create fake offset */ 140 ret = drm_gem_create_mmap_offset(obj); 141 if (ret) { 142 kfree(vram); 143 return ret; 144 } 145 146 ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle); 147 if (ret) { 148 kfree(vram); 149 return ret; 150 } 151 152 virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL, 153 0); 154 if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) { 155 ret = virtio_gpu_vram_map(&vram->base); 156 if (ret) { 157 virtio_gpu_vram_free(obj); 158 return ret; 159 } 160 } 161 162 *bo_ptr = &vram->base; 163 return 0; 164 } 165