1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <linux/dma-mapping.h> 27 #include <linux/moduleparam.h> 28 29 #include "virtgpu_drv.h" 30 31 static int virtio_gpu_virglrenderer_workaround = 1; 32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); 33 34 static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, 35 uint32_t *resid) 36 { 37 if (virtio_gpu_virglrenderer_workaround) { 38 /* 39 * Hack to avoid re-using resource IDs. 40 * 41 * virglrenderer versions up to (and including) 0.7.0 42 * can't deal with that. virglrenderer commit 43 * "f91a9dd35715 Fix unlinking resources from hash 44 * table." (Feb 2019) fixes the bug. 45 */ 46 static atomic_t seqno = ATOMIC_INIT(0); 47 int handle = atomic_inc_return(&seqno); 48 *resid = handle + 1; 49 } else { 50 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); 51 if (handle < 0) 52 return handle; 53 *resid = handle + 1; 54 } 55 return 0; 56 } 57 58 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) 59 { 60 if (!virtio_gpu_virglrenderer_workaround) { 61 ida_free(&vgdev->resource_ida, id - 1); 62 } 63 } 64 65 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) 66 { 67 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 68 69 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 70 if (virtio_gpu_is_shmem(bo)) { 71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); 72 73 if (shmem->pages) { 74 if (shmem->mapped) { 75 dma_unmap_sg(vgdev->vdev->dev.parent, 76 shmem->pages->sgl, shmem->mapped, 77 DMA_TO_DEVICE); 78 shmem->mapped = 0; 79 } 80 81 sg_free_table(shmem->pages); 82 kfree(shmem->pages); 83 shmem->pages = NULL; 84 drm_gem_shmem_unpin(&bo->base.base); 85 } 86 87 drm_gem_shmem_free_object(&bo->base.base); 88 } 89 } 90 91 static void virtio_gpu_free_object(struct drm_gem_object *obj) 92 { 93 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 94 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 95 96 if (bo->created) { 97 virtio_gpu_cmd_unref_resource(vgdev, bo); 98 virtio_gpu_notify(vgdev); 99 /* completion handler calls virtio_gpu_cleanup_object() */ 100 return; 101 } 102 virtio_gpu_cleanup_object(bo); 103 } 104 105 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { 106 .free = virtio_gpu_free_object, 107 .open = virtio_gpu_gem_object_open, 108 .close = virtio_gpu_gem_object_close, 109 110 .print_info = drm_gem_shmem_print_info, 111 .pin = drm_gem_shmem_pin, 112 .unpin = drm_gem_shmem_unpin, 113 .get_sg_table = drm_gem_shmem_get_sg_table, 114 .vmap = drm_gem_shmem_vmap, 115 .vunmap = drm_gem_shmem_vunmap, 116 .mmap = drm_gem_shmem_mmap, 117 }; 118 119 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) 120 { 121 return bo->base.base.funcs == &virtio_gpu_shmem_funcs; 122 } 123 124 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, 125 size_t size) 126 { 127 struct virtio_gpu_object_shmem *shmem; 128 struct drm_gem_shmem_object *dshmem; 129 130 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 131 if (!shmem) 132 return NULL; 133 134 dshmem = &shmem->base.base; 135 dshmem->base.funcs = &virtio_gpu_shmem_funcs; 136 dshmem->map_cached = true; 137 return &dshmem->base; 138 } 139 140 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, 141 struct virtio_gpu_object *bo, 142 struct virtio_gpu_mem_entry **ents, 143 unsigned int *nents) 144 { 145 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); 146 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); 147 struct scatterlist *sg; 148 int si, ret; 149 150 ret = drm_gem_shmem_pin(&bo->base.base); 151 if (ret < 0) 152 return -EINVAL; 153 154 /* 155 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of 156 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of 157 * dma-ops. This is discouraged for other drivers, but should be fine 158 * since virtio_gpu doesn't support dma-buf import from other devices. 159 */ 160 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); 161 if (!shmem->pages) { 162 drm_gem_shmem_unpin(&bo->base.base); 163 return -EINVAL; 164 } 165 166 if (use_dma_api) { 167 shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, 168 shmem->pages->sgl, 169 shmem->pages->nents, 170 DMA_TO_DEVICE); 171 *nents = shmem->mapped; 172 } else { 173 *nents = shmem->pages->nents; 174 } 175 176 *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), 177 GFP_KERNEL); 178 if (!(*ents)) { 179 DRM_ERROR("failed to allocate ent list\n"); 180 return -ENOMEM; 181 } 182 183 for_each_sg(shmem->pages->sgl, sg, *nents, si) { 184 (*ents)[si].addr = cpu_to_le64(use_dma_api 185 ? sg_dma_address(sg) 186 : sg_phys(sg)); 187 (*ents)[si].length = cpu_to_le32(sg->length); 188 (*ents)[si].padding = 0; 189 } 190 return 0; 191 } 192 193 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, 194 struct virtio_gpu_object_params *params, 195 struct virtio_gpu_object **bo_ptr, 196 struct virtio_gpu_fence *fence) 197 { 198 struct virtio_gpu_object_array *objs = NULL; 199 struct drm_gem_shmem_object *shmem_obj; 200 struct virtio_gpu_object *bo; 201 struct virtio_gpu_mem_entry *ents; 202 unsigned int nents; 203 int ret; 204 205 *bo_ptr = NULL; 206 207 params->size = roundup(params->size, PAGE_SIZE); 208 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); 209 if (IS_ERR(shmem_obj)) 210 return PTR_ERR(shmem_obj); 211 bo = gem_to_virtio_gpu_obj(&shmem_obj->base); 212 213 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 214 if (ret < 0) 215 goto err_free_gem; 216 217 bo->dumb = params->dumb; 218 219 if (fence) { 220 ret = -ENOMEM; 221 objs = virtio_gpu_array_alloc(1); 222 if (!objs) 223 goto err_put_id; 224 virtio_gpu_array_add_obj(objs, &bo->base.base); 225 226 ret = virtio_gpu_array_lock_resv(objs); 227 if (ret != 0) 228 goto err_put_objs; 229 } 230 231 if (params->virgl) { 232 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, 233 objs, fence); 234 } else { 235 virtio_gpu_cmd_create_resource(vgdev, bo, params, 236 objs, fence); 237 } 238 239 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); 240 if (ret != 0) { 241 virtio_gpu_free_object(&shmem_obj->base); 242 return ret; 243 } 244 245 virtio_gpu_object_attach(vgdev, bo, ents, nents); 246 247 *bo_ptr = bo; 248 return 0; 249 250 err_put_objs: 251 virtio_gpu_array_put_free(objs); 252 err_put_id: 253 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 254 err_free_gem: 255 drm_gem_shmem_free_object(&shmem_obj->base); 256 return ret; 257 } 258