1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 4 #include <linux/mm.h> 5 #include <linux/sync_file.h> 6 #include <linux/pagemap.h> 7 #include <linux/shmem_fs.h> 8 #include <linux/dma-mapping.h> 9 10 #include <drm/drm_file.h> 11 #include <drm/drm_syncobj.h> 12 #include <drm/drm_utils.h> 13 14 #include <drm/lima_drm.h> 15 16 #include "lima_drv.h" 17 #include "lima_gem.h" 18 #include "lima_vm.h" 19 20 int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) 21 { 22 struct page **pages; 23 struct address_space *mapping = bo->base.base.filp->f_mapping; 24 struct device *dev = bo->base.base.dev->dev; 25 size_t old_size = bo->heap_size; 26 size_t new_size = bo->heap_size ? bo->heap_size * 2 : 27 (lima_heap_init_nr_pages << PAGE_SHIFT); 28 struct sg_table sgt; 29 int i, ret; 30 31 if (bo->heap_size >= bo->base.base.size) 32 return -ENOSPC; 33 34 new_size = min(new_size, bo->base.base.size); 35 36 mutex_lock(&bo->base.pages_lock); 37 38 if (bo->base.pages) { 39 pages = bo->base.pages; 40 } else { 41 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, 42 sizeof(*pages), GFP_KERNEL | __GFP_ZERO); 43 if (!pages) { 44 mutex_unlock(&bo->base.pages_lock); 45 return -ENOMEM; 46 } 47 48 bo->base.pages = pages; 49 bo->base.pages_use_count = 1; 50 51 mapping_set_unevictable(mapping); 52 } 53 54 for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) { 55 struct page *page = shmem_read_mapping_page(mapping, i); 56 57 if (IS_ERR(page)) { 58 mutex_unlock(&bo->base.pages_lock); 59 return PTR_ERR(page); 60 } 61 pages[i] = page; 62 } 63 64 mutex_unlock(&bo->base.pages_lock); 65 66 ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, 67 new_size, GFP_KERNEL); 68 if (ret) 69 return ret; 70 71 if (bo->base.sgt) { 72 dma_unmap_sg(dev, bo->base.sgt->sgl, 73 bo->base.sgt->nents, DMA_BIDIRECTIONAL); 74 sg_free_table(bo->base.sgt); 75 } else { 76 bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); 77 if (!bo->base.sgt) { 78 sg_free_table(&sgt); 79 return -ENOMEM; 80 } 81 } 82 83 dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL); 84 85 *bo->base.sgt = sgt; 86 87 if (vm) { 88 ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT); 89 if (ret) 90 return ret; 91 } 92 93 bo->heap_size = new_size; 94 return 0; 95 } 96 97 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, 98 u32 size, u32 flags, u32 *handle) 99 { 100 int err; 101 gfp_t mask; 102 struct drm_gem_shmem_object *shmem; 103 struct drm_gem_object *obj; 104 struct lima_bo *bo; 105 bool is_heap = flags & LIMA_BO_FLAG_HEAP; 106 107 shmem = drm_gem_shmem_create(dev, size); 108 if (IS_ERR(shmem)) 109 return PTR_ERR(shmem); 110 111 obj = &shmem->base; 112 113 /* Mali Utgard GPU can only support 32bit address space */ 114 mask = mapping_gfp_mask(obj->filp->f_mapping); 115 mask &= ~__GFP_HIGHMEM; 116 mask |= __GFP_DMA32; 117 mapping_set_gfp_mask(obj->filp->f_mapping, mask); 118 119 if (is_heap) { 120 bo = to_lima_bo(obj); 121 err = lima_heap_alloc(bo, NULL); 122 if (err) 123 goto out; 124 } else { 125 struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj); 126 127 if (IS_ERR(sgt)) { 128 err = PTR_ERR(sgt); 129 goto out; 130 } 131 } 132 133 err = drm_gem_handle_create(file, obj, handle); 134 135 out: 136 /* drop reference from allocate - handle holds it now */ 137 drm_gem_object_put_unlocked(obj); 138 139 return err; 140 } 141 142 static void lima_gem_free_object(struct drm_gem_object *obj) 143 { 144 struct lima_bo *bo = to_lima_bo(obj); 145 146 if (!list_empty(&bo->va)) 147 dev_err(obj->dev->dev, "lima gem free bo still has va\n"); 148 149 drm_gem_shmem_free_object(obj); 150 } 151 152 static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) 153 { 154 struct lima_bo *bo = to_lima_bo(obj); 155 struct lima_drm_priv *priv = to_lima_drm_priv(file); 156 struct lima_vm *vm = priv->vm; 157 158 return lima_vm_bo_add(vm, bo, true); 159 } 160 161 static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) 162 { 163 struct lima_bo *bo = to_lima_bo(obj); 164 struct lima_drm_priv *priv = to_lima_drm_priv(file); 165 struct lima_vm *vm = priv->vm; 166 167 lima_vm_bo_del(vm, bo); 168 } 169 170 static int lima_gem_pin(struct drm_gem_object *obj) 171 { 172 struct lima_bo *bo = to_lima_bo(obj); 173 174 if (bo->heap_size) 175 return -EINVAL; 176 177 return drm_gem_shmem_pin(obj); 178 } 179 180 static void *lima_gem_vmap(struct drm_gem_object *obj) 181 { 182 struct lima_bo *bo = to_lima_bo(obj); 183 184 if (bo->heap_size) 185 return ERR_PTR(-EINVAL); 186 187 return drm_gem_shmem_vmap(obj); 188 } 189 190 static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 191 { 192 struct lima_bo *bo = to_lima_bo(obj); 193 194 if (bo->heap_size) 195 return -EINVAL; 196 197 return drm_gem_shmem_mmap(obj, vma); 198 } 199 200 static const struct drm_gem_object_funcs lima_gem_funcs = { 201 .free = lima_gem_free_object, 202 .open = lima_gem_object_open, 203 .close = lima_gem_object_close, 204 .print_info = drm_gem_shmem_print_info, 205 .pin = lima_gem_pin, 206 .unpin = drm_gem_shmem_unpin, 207 .get_sg_table = drm_gem_shmem_get_sg_table, 208 .vmap = lima_gem_vmap, 209 .vunmap = drm_gem_shmem_vunmap, 210 .mmap = lima_gem_mmap, 211 }; 212 213 struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) 214 { 215 struct lima_bo *bo; 216 217 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 218 if (!bo) 219 return NULL; 220 221 mutex_init(&bo->lock); 222 INIT_LIST_HEAD(&bo->va); 223 224 bo->base.base.funcs = &lima_gem_funcs; 225 226 return &bo->base.base; 227 } 228 229 int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) 230 { 231 struct drm_gem_object *obj; 232 struct lima_bo *bo; 233 struct lima_drm_priv *priv = to_lima_drm_priv(file); 234 struct lima_vm *vm = priv->vm; 235 236 obj = drm_gem_object_lookup(file, handle); 237 if (!obj) 238 return -ENOENT; 239 240 bo = to_lima_bo(obj); 241 242 *va = lima_vm_get_va(vm, bo); 243 244 *offset = drm_vma_node_offset_addr(&obj->vma_node); 245 246 drm_gem_object_put_unlocked(obj); 247 return 0; 248 } 249 250 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, 251 bool write, bool explicit) 252 { 253 int err = 0; 254 255 if (!write) { 256 err = dma_resv_reserve_shared(lima_bo_resv(bo), 1); 257 if (err) 258 return err; 259 } 260 261 /* explicit sync use user passed dep fence */ 262 if (explicit) 263 return 0; 264 265 return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write); 266 } 267 268 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) 269 { 270 int i, err; 271 272 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { 273 struct dma_fence *fence = NULL; 274 275 if (!submit->in_sync[i]) 276 continue; 277 278 err = drm_syncobj_find_fence(file, submit->in_sync[i], 279 0, 0, &fence); 280 if (err) 281 return err; 282 283 err = drm_gem_fence_array_add(&submit->task->deps, fence); 284 if (err) { 285 dma_fence_put(fence); 286 return err; 287 } 288 } 289 290 return 0; 291 } 292 293 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) 294 { 295 int i, err = 0; 296 struct ww_acquire_ctx ctx; 297 struct lima_drm_priv *priv = to_lima_drm_priv(file); 298 struct lima_vm *vm = priv->vm; 299 struct drm_syncobj *out_sync = NULL; 300 struct dma_fence *fence; 301 struct lima_bo **bos = submit->lbos; 302 303 if (submit->out_sync) { 304 out_sync = drm_syncobj_find(file, submit->out_sync); 305 if (!out_sync) 306 return -ENOENT; 307 } 308 309 for (i = 0; i < submit->nr_bos; i++) { 310 struct drm_gem_object *obj; 311 struct lima_bo *bo; 312 313 obj = drm_gem_object_lookup(file, submit->bos[i].handle); 314 if (!obj) { 315 err = -ENOENT; 316 goto err_out0; 317 } 318 319 bo = to_lima_bo(obj); 320 321 /* increase refcnt of gpu va map to prevent unmapped when executing, 322 * will be decreased when task done 323 */ 324 err = lima_vm_bo_add(vm, bo, false); 325 if (err) { 326 drm_gem_object_put_unlocked(obj); 327 goto err_out0; 328 } 329 330 bos[i] = bo; 331 } 332 333 err = drm_gem_lock_reservations((struct drm_gem_object **)bos, 334 submit->nr_bos, &ctx); 335 if (err) 336 goto err_out0; 337 338 err = lima_sched_task_init( 339 submit->task, submit->ctx->context + submit->pipe, 340 bos, submit->nr_bos, vm); 341 if (err) 342 goto err_out1; 343 344 err = lima_gem_add_deps(file, submit); 345 if (err) 346 goto err_out2; 347 348 for (i = 0; i < submit->nr_bos; i++) { 349 err = lima_gem_sync_bo( 350 submit->task, bos[i], 351 submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE, 352 submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE); 353 if (err) 354 goto err_out2; 355 } 356 357 fence = lima_sched_context_queue_task( 358 submit->ctx->context + submit->pipe, submit->task); 359 360 for (i = 0; i < submit->nr_bos; i++) { 361 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 362 dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); 363 else 364 dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); 365 } 366 367 drm_gem_unlock_reservations((struct drm_gem_object **)bos, 368 submit->nr_bos, &ctx); 369 370 for (i = 0; i < submit->nr_bos; i++) 371 drm_gem_object_put_unlocked(&bos[i]->base.base); 372 373 if (out_sync) { 374 drm_syncobj_replace_fence(out_sync, fence); 375 drm_syncobj_put(out_sync); 376 } 377 378 dma_fence_put(fence); 379 380 return 0; 381 382 err_out2: 383 lima_sched_task_fini(submit->task); 384 err_out1: 385 drm_gem_unlock_reservations((struct drm_gem_object **)bos, 386 submit->nr_bos, &ctx); 387 err_out0: 388 for (i = 0; i < submit->nr_bos; i++) { 389 if (!bos[i]) 390 break; 391 lima_vm_bo_del(vm, bos[i]); 392 drm_gem_object_put_unlocked(&bos[i]->base.base); 393 } 394 if (out_sync) 395 drm_syncobj_put(out_sync); 396 return err; 397 } 398 399 int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) 400 { 401 bool write = op & LIMA_GEM_WAIT_WRITE; 402 long ret, timeout; 403 404 if (!op) 405 return 0; 406 407 timeout = drm_timeout_abs_to_jiffies(timeout_ns); 408 409 ret = drm_gem_dma_resv_wait(file, handle, write, timeout); 410 if (ret == -ETIME) 411 ret = timeout ? -ETIMEDOUT : -EBUSY; 412 413 return ret; 414 } 415