1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/highmem.h> 8 #include <linux/module.h> 9 #include <linux/set_memory.h> 10 #include <linux/xarray.h> 11 12 #include <drm/drm_cache.h> 13 #include <drm/drm_debugfs.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_utils.h> 16 17 #include "ivpu_drv.h" 18 #include "ivpu_gem.h" 19 #include "ivpu_hw.h" 20 #include "ivpu_mmu.h" 21 #include "ivpu_mmu_context.h" 22 23 MODULE_IMPORT_NS(DMA_BUF); 24 25 static const struct drm_gem_object_funcs ivpu_gem_funcs; 26 27 static struct lock_class_key prime_bo_lock_class_key; 28 29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) 30 { 31 /* Pages are managed by the underlying dma-buf */ 32 return 0; 33 } 34 35 static void prime_free_pages_locked(struct ivpu_bo *bo) 36 { 37 /* Pages are managed by the underlying dma-buf */ 38 } 39 40 static int prime_map_pages_locked(struct ivpu_bo *bo) 41 { 42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 43 struct sg_table *sgt; 44 45 WARN_ON(!bo->base.import_attach); 46 47 sgt = dma_buf_map_attachment(bo->base.import_attach, DMA_BIDIRECTIONAL); 48 if (IS_ERR(sgt)) { 49 ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt)); 50 return PTR_ERR(sgt); 51 } 52 53 bo->sgt = sgt; 54 return 0; 55 } 56 57 static void prime_unmap_pages_locked(struct ivpu_bo *bo) 58 { 59 WARN_ON(!bo->base.import_attach); 60 61 dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); 62 bo->sgt = NULL; 63 } 64 65 static const struct ivpu_bo_ops prime_ops = { 66 .type = IVPU_BO_TYPE_PRIME, 67 .name = "prime", 68 .alloc_pages = prime_alloc_pages_locked, 69 .free_pages = prime_free_pages_locked, 70 .map_pages = prime_map_pages_locked, 71 .unmap_pages = prime_unmap_pages_locked, 72 }; 73 74 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) 75 { 76 int npages = bo->base.size >> PAGE_SHIFT; 77 struct page **pages; 78 79 pages = drm_gem_get_pages(&bo->base); 80 if (IS_ERR(pages)) 81 return PTR_ERR(pages); 82 83 if (bo->flags & DRM_IVPU_BO_WC) 84 set_pages_array_wc(pages, npages); 85 else if (bo->flags & DRM_IVPU_BO_UNCACHED) 86 set_pages_array_uc(pages, npages); 87 88 bo->pages = pages; 89 return 0; 90 } 91 92 static void shmem_free_pages_locked(struct ivpu_bo *bo) 93 { 94 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 95 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); 96 97 drm_gem_put_pages(&bo->base, bo->pages, true, false); 98 bo->pages = NULL; 99 } 100 101 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) 102 { 103 int npages = bo->base.size >> PAGE_SHIFT; 104 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 105 struct sg_table *sgt; 106 int ret; 107 108 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); 109 if (IS_ERR(sgt)) { 110 ivpu_err(vdev, "Failed to allocate sgtable\n"); 111 return PTR_ERR(sgt); 112 } 113 114 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); 115 if (ret) { 116 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); 117 goto err_free_sgt; 118 } 119 120 bo->sgt = sgt; 121 return 0; 122 123 err_free_sgt: 124 kfree(sgt); 125 return ret; 126 } 127 128 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) 129 { 130 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 131 132 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); 133 sg_free_table(bo->sgt); 134 kfree(bo->sgt); 135 bo->sgt = NULL; 136 } 137 138 static const struct ivpu_bo_ops shmem_ops = { 139 .type = IVPU_BO_TYPE_SHMEM, 140 .name = "shmem", 141 .alloc_pages = shmem_alloc_pages_locked, 142 .free_pages = shmem_free_pages_locked, 143 .map_pages = ivpu_bo_map_pages_locked, 144 .unmap_pages = ivpu_bo_unmap_pages_locked, 145 }; 146 147 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) 148 { 149 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 150 struct page **pages; 151 int ret; 152 153 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); 154 if (!pages) 155 return -ENOMEM; 156 157 for (i = 0; i < npages; i++) { 158 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 159 if (!pages[i]) { 160 ret = -ENOMEM; 161 goto err_free_pages; 162 } 163 cond_resched(); 164 } 165 166 bo->pages = pages; 167 return 0; 168 169 err_free_pages: 170 while (i--) 171 put_page(pages[i]); 172 kvfree(pages); 173 return ret; 174 } 175 176 static void internal_free_pages_locked(struct ivpu_bo *bo) 177 { 178 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 179 180 for (i = 0; i < npages; i++) 181 put_page(bo->pages[i]); 182 183 kvfree(bo->pages); 184 bo->pages = NULL; 185 } 186 187 static const struct ivpu_bo_ops internal_ops = { 188 .type = IVPU_BO_TYPE_INTERNAL, 189 .name = "internal", 190 .alloc_pages = internal_alloc_pages_locked, 191 .free_pages = internal_free_pages_locked, 192 .map_pages = ivpu_bo_map_pages_locked, 193 .unmap_pages = ivpu_bo_unmap_pages_locked, 194 }; 195 196 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) 197 { 198 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 199 int ret; 200 201 lockdep_assert_held(&bo->lock); 202 drm_WARN_ON(&vdev->drm, bo->sgt); 203 204 ret = bo->ops->alloc_pages(bo); 205 if (ret) { 206 ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret); 207 return ret; 208 } 209 210 ret = bo->ops->map_pages(bo); 211 if (ret) { 212 ivpu_err(vdev, "Failed to map pages for BO: %d", ret); 213 goto err_free_pages; 214 } 215 return ret; 216 217 err_free_pages: 218 bo->ops->free_pages(bo); 219 return ret; 220 } 221 222 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) 223 { 224 mutex_lock(&bo->lock); 225 226 WARN_ON(!bo->sgt); 227 bo->ops->unmap_pages(bo); 228 WARN_ON(bo->sgt); 229 bo->ops->free_pages(bo); 230 WARN_ON(bo->pages); 231 232 mutex_unlock(&bo->lock); 233 } 234 235 /* 236 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. 237 * 238 * This function pins physical memory pages, then maps the physical pages 239 * to IOMMU address space and finally updates the VPU MMU page tables 240 * to allow the VPU to translate VPU address to IOMMU address. 241 */ 242 int __must_check ivpu_bo_pin(struct ivpu_bo *bo) 243 { 244 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 245 int ret = 0; 246 247 mutex_lock(&bo->lock); 248 249 if (!bo->vpu_addr) { 250 ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n", 251 bo->ctx->id, bo->handle); 252 ret = -EINVAL; 253 goto unlock; 254 } 255 256 if (!bo->sgt) { 257 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 258 if (ret) 259 goto unlock; 260 } 261 262 if (!bo->mmu_mapped) { 263 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, 264 ivpu_bo_is_snooped(bo)); 265 if (ret) { 266 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); 267 goto unlock; 268 } 269 bo->mmu_mapped = true; 270 } 271 272 unlock: 273 mutex_unlock(&bo->lock); 274 275 return ret; 276 } 277 278 static int 279 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, 280 const struct ivpu_addr_range *range) 281 { 282 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 283 int ret; 284 285 if (!range) { 286 if (bo->flags & DRM_IVPU_BO_HIGH_MEM) 287 range = &vdev->hw->ranges.user_high; 288 else 289 range = &vdev->hw->ranges.user_low; 290 } 291 292 mutex_lock(&ctx->lock); 293 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node); 294 if (!ret) { 295 bo->ctx = ctx; 296 bo->vpu_addr = bo->mm_node.start; 297 list_add_tail(&bo->ctx_node, &ctx->bo_list); 298 } 299 mutex_unlock(&ctx->lock); 300 301 return ret; 302 } 303 304 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) 305 { 306 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 307 struct ivpu_mmu_context *ctx = bo->ctx; 308 309 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 310 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 311 312 mutex_lock(&bo->lock); 313 314 if (bo->mmu_mapped) { 315 drm_WARN_ON(&vdev->drm, !bo->sgt); 316 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); 317 bo->mmu_mapped = false; 318 } 319 320 mutex_lock(&ctx->lock); 321 list_del(&bo->ctx_node); 322 bo->vpu_addr = 0; 323 bo->ctx = NULL; 324 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); 325 mutex_unlock(&ctx->lock); 326 327 mutex_unlock(&bo->lock); 328 } 329 330 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) 331 { 332 struct ivpu_bo *bo, *tmp; 333 334 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) 335 ivpu_bo_free_vpu_addr(bo); 336 } 337 338 static struct ivpu_bo * 339 ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, 340 u64 size, u32 flags, const struct ivpu_bo_ops *ops, 341 const struct ivpu_addr_range *range, u64 user_ptr) 342 { 343 struct ivpu_bo *bo; 344 int ret = 0; 345 346 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) 347 return ERR_PTR(-EINVAL); 348 349 switch (flags & DRM_IVPU_BO_CACHE_MASK) { 350 case DRM_IVPU_BO_CACHED: 351 case DRM_IVPU_BO_UNCACHED: 352 case DRM_IVPU_BO_WC: 353 break; 354 default: 355 return ERR_PTR(-EINVAL); 356 } 357 358 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 359 if (!bo) 360 return ERR_PTR(-ENOMEM); 361 362 mutex_init(&bo->lock); 363 bo->base.funcs = &ivpu_gem_funcs; 364 bo->flags = flags; 365 bo->ops = ops; 366 bo->user_ptr = user_ptr; 367 368 if (ops->type == IVPU_BO_TYPE_SHMEM) 369 ret = drm_gem_object_init(&vdev->drm, &bo->base, size); 370 else 371 drm_gem_private_object_init(&vdev->drm, &bo->base, size); 372 373 if (ret) { 374 ivpu_err(vdev, "Failed to initialize drm object\n"); 375 goto err_free; 376 } 377 378 if (flags & DRM_IVPU_BO_MAPPABLE) { 379 ret = drm_gem_create_mmap_offset(&bo->base); 380 if (ret) { 381 ivpu_err(vdev, "Failed to allocate mmap offset\n"); 382 goto err_release; 383 } 384 } 385 386 if (mmu_context) { 387 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range); 388 if (ret) { 389 ivpu_err(vdev, "Failed to add BO to context: %d\n", ret); 390 goto err_release; 391 } 392 } 393 394 return bo; 395 396 err_release: 397 drm_gem_object_release(&bo->base); 398 err_free: 399 kfree(bo); 400 return ERR_PTR(ret); 401 } 402 403 static void ivpu_bo_free(struct drm_gem_object *obj) 404 { 405 struct ivpu_bo *bo = to_ivpu_bo(obj); 406 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 407 408 if (bo->ctx) 409 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 410 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 411 else 412 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", 413 (bool)bo->sgt, bo->mmu_mapped); 414 415 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); 416 417 vunmap(bo->kvaddr); 418 419 if (bo->ctx) 420 ivpu_bo_free_vpu_addr(bo); 421 422 if (bo->sgt) 423 ivpu_bo_unmap_and_free_pages(bo); 424 425 if (bo->base.import_attach) 426 drm_prime_gem_destroy(&bo->base, bo->sgt); 427 428 drm_gem_object_release(&bo->base); 429 430 mutex_destroy(&bo->lock); 431 kfree(bo); 432 } 433 434 static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 435 { 436 struct ivpu_bo *bo = to_ivpu_bo(obj); 437 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 438 439 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", 440 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name); 441 442 if (obj->import_attach) { 443 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 444 drm_gem_object_put(obj); 445 vma->vm_private_data = NULL; 446 return dma_buf_mmap(obj->dma_buf, vma, 0); 447 } 448 449 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND; 450 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); 451 452 return 0; 453 } 454 455 static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) 456 { 457 struct ivpu_bo *bo = to_ivpu_bo(obj); 458 loff_t npages = obj->size >> PAGE_SHIFT; 459 int ret = 0; 460 461 mutex_lock(&bo->lock); 462 463 if (!bo->sgt) 464 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 465 466 mutex_unlock(&bo->lock); 467 468 if (ret) 469 return ERR_PTR(ret); 470 471 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); 472 } 473 474 static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) 475 { 476 struct vm_area_struct *vma = vmf->vma; 477 struct drm_gem_object *obj = vma->vm_private_data; 478 struct ivpu_bo *bo = to_ivpu_bo(obj); 479 loff_t npages = obj->size >> PAGE_SHIFT; 480 pgoff_t page_offset; 481 struct page *page; 482 vm_fault_t ret; 483 int err; 484 485 mutex_lock(&bo->lock); 486 487 if (!bo->sgt) { 488 err = ivpu_bo_alloc_and_map_pages_locked(bo); 489 if (err) { 490 ret = vmf_error(err); 491 goto unlock; 492 } 493 } 494 495 /* We don't use vmf->pgoff since that has the fake offset */ 496 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 497 if (page_offset >= npages) { 498 ret = VM_FAULT_SIGBUS; 499 } else { 500 page = bo->pages[page_offset]; 501 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); 502 } 503 504 unlock: 505 mutex_unlock(&bo->lock); 506 507 return ret; 508 } 509 510 static const struct vm_operations_struct ivpu_vm_ops = { 511 .fault = ivpu_vm_fault, 512 .open = drm_gem_vm_open, 513 .close = drm_gem_vm_close, 514 }; 515 516 static const struct drm_gem_object_funcs ivpu_gem_funcs = { 517 .free = ivpu_bo_free, 518 .mmap = ivpu_bo_mmap, 519 .vm_ops = &ivpu_vm_ops, 520 .get_sg_table = ivpu_bo_get_sg_table, 521 }; 522 523 int 524 ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 525 { 526 struct ivpu_file_priv *file_priv = file->driver_priv; 527 struct ivpu_device *vdev = file_priv->vdev; 528 struct drm_ivpu_bo_create *args = data; 529 u64 size = PAGE_ALIGN(args->size); 530 struct ivpu_bo *bo; 531 int ret; 532 533 if (args->flags & ~DRM_IVPU_BO_FLAGS) 534 return -EINVAL; 535 536 if (size == 0) 537 return -EINVAL; 538 539 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); 540 if (IS_ERR(bo)) { 541 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", 542 bo, file_priv->ctx.id, args->size, args->flags); 543 return PTR_ERR(bo); 544 } 545 546 ret = drm_gem_handle_create(file, &bo->base, &bo->handle); 547 if (!ret) { 548 args->vpu_addr = bo->vpu_addr; 549 args->handle = bo->handle; 550 } 551 552 drm_gem_object_put(&bo->base); 553 554 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", 555 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags); 556 557 return ret; 558 } 559 560 struct ivpu_bo * 561 ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags) 562 { 563 const struct ivpu_addr_range *range; 564 struct ivpu_addr_range fixed_range; 565 struct ivpu_bo *bo; 566 pgprot_t prot; 567 int ret; 568 569 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); 570 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); 571 572 if (vpu_addr) { 573 fixed_range.start = vpu_addr; 574 fixed_range.end = vpu_addr + size; 575 range = &fixed_range; 576 } else { 577 range = &vdev->hw->ranges.global_low; 578 } 579 580 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); 581 if (IS_ERR(bo)) { 582 ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", 583 bo, vpu_addr, size, flags); 584 return NULL; 585 } 586 587 ret = ivpu_bo_pin(bo); 588 if (ret) 589 goto err_put; 590 591 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 592 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); 593 594 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); 595 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); 596 if (!bo->kvaddr) { 597 ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n"); 598 goto err_put; 599 } 600 601 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", 602 bo->vpu_addr, bo->base.size, flags); 603 604 return bo; 605 606 err_put: 607 drm_gem_object_put(&bo->base); 608 return NULL; 609 } 610 611 void ivpu_bo_free_internal(struct ivpu_bo *bo) 612 { 613 drm_gem_object_put(&bo->base); 614 } 615 616 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) 617 { 618 struct ivpu_device *vdev = to_ivpu_device(dev); 619 struct dma_buf_attachment *attach; 620 struct ivpu_bo *bo; 621 622 attach = dma_buf_attach(buf, dev->dev); 623 if (IS_ERR(attach)) 624 return ERR_CAST(attach); 625 626 get_dma_buf(buf); 627 628 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); 629 if (IS_ERR(bo)) { 630 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); 631 goto err_detach; 632 } 633 634 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); 635 636 bo->base.import_attach = attach; 637 638 return &bo->base; 639 640 err_detach: 641 dma_buf_detach(buf, attach); 642 dma_buf_put(buf); 643 return ERR_CAST(bo); 644 } 645 646 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 647 { 648 struct ivpu_file_priv *file_priv = file->driver_priv; 649 struct ivpu_device *vdev = to_ivpu_device(dev); 650 struct drm_ivpu_bo_info *args = data; 651 struct drm_gem_object *obj; 652 struct ivpu_bo *bo; 653 int ret = 0; 654 655 obj = drm_gem_object_lookup(file, args->handle); 656 if (!obj) 657 return -ENOENT; 658 659 bo = to_ivpu_bo(obj); 660 661 mutex_lock(&bo->lock); 662 663 if (!bo->ctx) { 664 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); 665 if (ret) { 666 ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret); 667 goto unlock; 668 } 669 } 670 671 args->flags = bo->flags; 672 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); 673 args->vpu_addr = bo->vpu_addr; 674 args->size = obj->size; 675 unlock: 676 mutex_unlock(&bo->lock); 677 drm_gem_object_put(obj); 678 return ret; 679 } 680 681 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 682 { 683 struct drm_ivpu_bo_wait *args = data; 684 struct drm_gem_object *obj; 685 unsigned long timeout; 686 long ret; 687 688 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 689 690 obj = drm_gem_object_lookup(file, args->handle); 691 if (!obj) 692 return -EINVAL; 693 694 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); 695 if (ret == 0) { 696 ret = -ETIMEDOUT; 697 } else if (ret > 0) { 698 ret = 0; 699 args->job_status = to_ivpu_bo(obj)->job_status; 700 } 701 702 drm_gem_object_put(obj); 703 704 return ret; 705 } 706 707 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) 708 { 709 unsigned long dma_refcount = 0; 710 711 if (bo->base.dma_buf && bo->base.dma_buf->file) 712 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); 713 714 drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", 715 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, 716 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); 717 } 718 719 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) 720 { 721 struct ivpu_device *vdev = to_ivpu_device(dev); 722 struct ivpu_file_priv *file_priv; 723 unsigned long ctx_id; 724 struct ivpu_bo *bo; 725 726 drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", 727 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); 728 729 mutex_lock(&vdev->gctx.lock); 730 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) 731 ivpu_bo_print_info(bo, p); 732 mutex_unlock(&vdev->gctx.lock); 733 734 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { 735 file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); 736 if (!file_priv) 737 continue; 738 739 mutex_lock(&file_priv->ctx.lock); 740 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) 741 ivpu_bo_print_info(bo, p); 742 mutex_unlock(&file_priv->ctx.lock); 743 744 ivpu_file_priv_put(&file_priv); 745 } 746 } 747 748 void ivpu_bo_list_print(struct drm_device *dev) 749 { 750 struct drm_printer p = drm_info_printer(dev->dev); 751 752 ivpu_bo_list(dev, &p); 753 } 754