1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <linux/dma-mapping.h> 27 #include <linux/moduleparam.h> 28 29 #include "virtgpu_drv.h" 30 31 static int virtio_gpu_virglrenderer_workaround = 1; 32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); 33 34 static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, 35 uint32_t *resid) 36 { 37 if (virtio_gpu_virglrenderer_workaround) { 38 /* 39 * Hack to avoid re-using resource IDs. 40 * 41 * virglrenderer versions up to (and including) 0.7.0 42 * can't deal with that. virglrenderer commit 43 * "f91a9dd35715 Fix unlinking resources from hash 44 * table." (Feb 2019) fixes the bug. 45 */ 46 static atomic_t seqno = ATOMIC_INIT(0); 47 int handle = atomic_inc_return(&seqno); 48 *resid = handle + 1; 49 } else { 50 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); 51 if (handle < 0) 52 return handle; 53 *resid = handle + 1; 54 } 55 return 0; 56 } 57 58 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) 59 { 60 if (!virtio_gpu_virglrenderer_workaround) { 61 ida_free(&vgdev->resource_ida, id - 1); 62 } 63 } 64 65 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) 66 { 67 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 68 69 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 70 if (virtio_gpu_is_shmem(bo)) { 71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); 72 73 if (shmem->pages) { 74 if (shmem->mapped) { 75 dma_unmap_sgtable(vgdev->vdev->dev.parent, 76 shmem->pages, DMA_TO_DEVICE, 0); 77 shmem->mapped = 0; 78 } 79 80 sg_free_table(shmem->pages); 81 kfree(shmem->pages); 82 shmem->pages = NULL; 83 drm_gem_shmem_unpin(&bo->base.base); 84 } 85 86 drm_gem_shmem_free_object(&bo->base.base); 87 } 88 } 89 90 static void virtio_gpu_free_object(struct drm_gem_object *obj) 91 { 92 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 93 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 94 95 if (bo->created) { 96 virtio_gpu_cmd_unref_resource(vgdev, bo); 97 virtio_gpu_notify(vgdev); 98 /* completion handler calls virtio_gpu_cleanup_object() */ 99 return; 100 } 101 virtio_gpu_cleanup_object(bo); 102 } 103 104 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { 105 .free = virtio_gpu_free_object, 106 .open = virtio_gpu_gem_object_open, 107 .close = virtio_gpu_gem_object_close, 108 109 .print_info = drm_gem_shmem_print_info, 110 .pin = drm_gem_shmem_pin, 111 .unpin = drm_gem_shmem_unpin, 112 .get_sg_table = drm_gem_shmem_get_sg_table, 113 .vmap = drm_gem_shmem_vmap, 114 .vunmap = drm_gem_shmem_vunmap, 115 .mmap = drm_gem_shmem_mmap, 116 }; 117 118 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) 119 { 120 return bo->base.base.funcs == &virtio_gpu_shmem_funcs; 121 } 122 123 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, 124 size_t size) 125 { 126 struct virtio_gpu_object_shmem *shmem; 127 struct drm_gem_shmem_object *dshmem; 128 129 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 130 if (!shmem) 131 return NULL; 132 133 dshmem = &shmem->base.base; 134 dshmem->base.funcs = &virtio_gpu_shmem_funcs; 135 dshmem->map_cached = true; 136 return &dshmem->base; 137 } 138 139 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, 140 struct virtio_gpu_object *bo, 141 struct virtio_gpu_mem_entry **ents, 142 unsigned int *nents) 143 { 144 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); 145 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); 146 struct scatterlist *sg; 147 int si, ret; 148 149 ret = drm_gem_shmem_pin(&bo->base.base); 150 if (ret < 0) 151 return -EINVAL; 152 153 /* 154 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of 155 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of 156 * dma-ops. This is discouraged for other drivers, but should be fine 157 * since virtio_gpu doesn't support dma-buf import from other devices. 158 */ 159 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); 160 if (!shmem->pages) { 161 drm_gem_shmem_unpin(&bo->base.base); 162 return -EINVAL; 163 } 164 165 if (use_dma_api) { 166 ret = dma_map_sgtable(vgdev->vdev->dev.parent, 167 shmem->pages, DMA_TO_DEVICE, 0); 168 if (ret) 169 return ret; 170 *nents = shmem->mapped = shmem->pages->nents; 171 } else { 172 *nents = shmem->pages->orig_nents; 173 } 174 175 *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), 176 GFP_KERNEL); 177 if (!(*ents)) { 178 DRM_ERROR("failed to allocate ent list\n"); 179 return -ENOMEM; 180 } 181 182 if (use_dma_api) { 183 for_each_sgtable_dma_sg(shmem->pages, sg, si) { 184 (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg)); 185 (*ents)[si].length = cpu_to_le32(sg_dma_len(sg)); 186 (*ents)[si].padding = 0; 187 } 188 } else { 189 for_each_sgtable_sg(shmem->pages, sg, si) { 190 (*ents)[si].addr = cpu_to_le64(sg_phys(sg)); 191 (*ents)[si].length = cpu_to_le32(sg->length); 192 (*ents)[si].padding = 0; 193 } 194 } 195 196 return 0; 197 } 198 199 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, 200 struct virtio_gpu_object_params *params, 201 struct virtio_gpu_object **bo_ptr, 202 struct virtio_gpu_fence *fence) 203 { 204 struct virtio_gpu_object_array *objs = NULL; 205 struct drm_gem_shmem_object *shmem_obj; 206 struct virtio_gpu_object *bo; 207 struct virtio_gpu_mem_entry *ents; 208 unsigned int nents; 209 int ret; 210 211 *bo_ptr = NULL; 212 213 params->size = roundup(params->size, PAGE_SIZE); 214 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); 215 if (IS_ERR(shmem_obj)) 216 return PTR_ERR(shmem_obj); 217 bo = gem_to_virtio_gpu_obj(&shmem_obj->base); 218 219 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 220 if (ret < 0) 221 goto err_free_gem; 222 223 bo->dumb = params->dumb; 224 225 if (fence) { 226 ret = -ENOMEM; 227 objs = virtio_gpu_array_alloc(1); 228 if (!objs) 229 goto err_put_id; 230 virtio_gpu_array_add_obj(objs, &bo->base.base); 231 232 ret = virtio_gpu_array_lock_resv(objs); 233 if (ret != 0) 234 goto err_put_objs; 235 } 236 237 if (params->virgl) { 238 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, 239 objs, fence); 240 } else { 241 virtio_gpu_cmd_create_resource(vgdev, bo, params, 242 objs, fence); 243 } 244 245 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); 246 if (ret != 0) { 247 virtio_gpu_free_object(&shmem_obj->base); 248 return ret; 249 } 250 251 virtio_gpu_object_attach(vgdev, bo, ents, nents); 252 253 *bo_ptr = bo; 254 return 0; 255 256 err_put_objs: 257 virtio_gpu_array_put_free(objs); 258 err_put_id: 259 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 260 err_free_gem: 261 drm_gem_shmem_free_object(&shmem_obj->base); 262 return ret; 263 } 264