1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <linux/dma-mapping.h> 27 #include <linux/moduleparam.h> 28 29 #include "virtgpu_drv.h" 30 31 static int virtio_gpu_virglrenderer_workaround = 1; 32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); 33 34 static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, 35 uint32_t *resid) 36 { 37 if (virtio_gpu_virglrenderer_workaround) { 38 /* 39 * Hack to avoid re-using resource IDs. 40 * 41 * virglrenderer versions up to (and including) 0.7.0 42 * can't deal with that. virglrenderer commit 43 * "f91a9dd35715 Fix unlinking resources from hash 44 * table." (Feb 2019) fixes the bug. 45 */ 46 static atomic_t seqno = ATOMIC_INIT(0); 47 int handle = atomic_inc_return(&seqno); 48 *resid = handle + 1; 49 } else { 50 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); 51 if (handle < 0) 52 return handle; 53 *resid = handle + 1; 54 } 55 return 0; 56 } 57 58 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) 59 { 60 if (!virtio_gpu_virglrenderer_workaround) { 61 ida_free(&vgdev->resource_ida, id - 1); 62 } 63 } 64 65 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) 66 { 67 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 68 69 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 70 if (virtio_gpu_is_shmem(bo)) { 71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); 72 73 if (shmem->pages) { 74 if (shmem->mapped) { 75 dma_unmap_sg(vgdev->vdev->dev.parent, 76 shmem->pages->sgl, shmem->mapped, 77 DMA_TO_DEVICE); 78 shmem->mapped = 0; 79 } 80 81 sg_free_table(shmem->pages); 82 shmem->pages = NULL; 83 drm_gem_shmem_unpin(&bo->base.base); 84 } 85 86 drm_gem_shmem_free_object(&bo->base.base); 87 } 88 } 89 90 static void virtio_gpu_free_object(struct drm_gem_object *obj) 91 { 92 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 93 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 94 95 if (bo->created) { 96 virtio_gpu_cmd_unref_resource(vgdev, bo); 97 virtio_gpu_notify(vgdev); 98 /* completion handler calls virtio_gpu_cleanup_object() */ 99 return; 100 } 101 virtio_gpu_cleanup_object(bo); 102 } 103 104 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { 105 .free = virtio_gpu_free_object, 106 .open = virtio_gpu_gem_object_open, 107 .close = virtio_gpu_gem_object_close, 108 109 .print_info = drm_gem_shmem_print_info, 110 .pin = drm_gem_shmem_pin, 111 .unpin = drm_gem_shmem_unpin, 112 .get_sg_table = drm_gem_shmem_get_sg_table, 113 .vmap = drm_gem_shmem_vmap, 114 .vunmap = drm_gem_shmem_vunmap, 115 .mmap = drm_gem_shmem_mmap, 116 }; 117 118 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) 119 { 120 return bo->base.base.funcs == &virtio_gpu_shmem_funcs; 121 } 122 123 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, 124 size_t size) 125 { 126 struct virtio_gpu_object *bo; 127 128 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 129 if (!bo) 130 return NULL; 131 132 bo->base.base.funcs = &virtio_gpu_shmem_funcs; 133 bo->base.map_cached = true; 134 return &bo->base.base; 135 } 136 137 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, 138 struct virtio_gpu_object *bo, 139 struct virtio_gpu_mem_entry **ents, 140 unsigned int *nents) 141 { 142 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 143 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); 144 struct scatterlist *sg; 145 int si, ret; 146 147 ret = drm_gem_shmem_pin(&bo->base.base); 148 if (ret < 0) 149 return -EINVAL; 150 151 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); 152 if (!shmem->pages) { 153 drm_gem_shmem_unpin(&bo->base.base); 154 return -EINVAL; 155 } 156 157 if (use_dma_api) { 158 shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, 159 shmem->pages->sgl, 160 shmem->pages->nents, 161 DMA_TO_DEVICE); 162 *nents = shmem->mapped; 163 } else { 164 *nents = shmem->pages->nents; 165 } 166 167 *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), 168 GFP_KERNEL); 169 if (!(*ents)) { 170 DRM_ERROR("failed to allocate ent list\n"); 171 return -ENOMEM; 172 } 173 174 for_each_sg(shmem->pages->sgl, sg, *nents, si) { 175 (*ents)[si].addr = cpu_to_le64(use_dma_api 176 ? sg_dma_address(sg) 177 : sg_phys(sg)); 178 (*ents)[si].length = cpu_to_le32(sg->length); 179 (*ents)[si].padding = 0; 180 } 181 return 0; 182 } 183 184 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, 185 struct virtio_gpu_object_params *params, 186 struct virtio_gpu_object **bo_ptr, 187 struct virtio_gpu_fence *fence) 188 { 189 struct virtio_gpu_object_array *objs = NULL; 190 struct drm_gem_shmem_object *shmem_obj; 191 struct virtio_gpu_object *bo; 192 struct virtio_gpu_mem_entry *ents; 193 unsigned int nents; 194 int ret; 195 196 *bo_ptr = NULL; 197 198 params->size = roundup(params->size, PAGE_SIZE); 199 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); 200 if (IS_ERR(shmem_obj)) 201 return PTR_ERR(shmem_obj); 202 bo = gem_to_virtio_gpu_obj(&shmem_obj->base); 203 204 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 205 if (ret < 0) 206 goto err_free_gem; 207 208 bo->dumb = params->dumb; 209 210 if (fence) { 211 ret = -ENOMEM; 212 objs = virtio_gpu_array_alloc(1); 213 if (!objs) 214 goto err_put_id; 215 virtio_gpu_array_add_obj(objs, &bo->base.base); 216 217 ret = virtio_gpu_array_lock_resv(objs); 218 if (ret != 0) 219 goto err_put_objs; 220 } 221 222 if (params->virgl) { 223 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, 224 objs, fence); 225 } else { 226 virtio_gpu_cmd_create_resource(vgdev, bo, params, 227 objs, fence); 228 } 229 230 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); 231 if (ret != 0) { 232 virtio_gpu_free_object(&shmem_obj->base); 233 return ret; 234 } 235 236 ret = virtio_gpu_object_attach(vgdev, bo, ents, nents); 237 if (ret != 0) { 238 virtio_gpu_free_object(&shmem_obj->base); 239 return ret; 240 } 241 242 virtio_gpu_notify(vgdev); 243 *bo_ptr = bo; 244 return 0; 245 246 err_put_objs: 247 virtio_gpu_array_put_free(objs); 248 err_put_id: 249 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 250 err_free_gem: 251 drm_gem_shmem_free_object(&shmem_obj->base); 252 return ret; 253 } 254