1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/highmem.h> 8 #include <linux/module.h> 9 #include <linux/set_memory.h> 10 #include <linux/xarray.h> 11 12 #include <drm/drm_cache.h> 13 #include <drm/drm_debugfs.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_utils.h> 16 17 #include "ivpu_drv.h" 18 #include "ivpu_gem.h" 19 #include "ivpu_hw.h" 20 #include "ivpu_mmu.h" 21 #include "ivpu_mmu_context.h" 22 23 MODULE_IMPORT_NS(DMA_BUF); 24 25 static const struct drm_gem_object_funcs ivpu_gem_funcs; 26 27 static struct lock_class_key prime_bo_lock_class_key; 28 29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) 30 { 31 /* Pages are managed by the underlying dma-buf */ 32 return 0; 33 } 34 35 static void prime_free_pages_locked(struct ivpu_bo *bo) 36 { 37 /* Pages are managed by the underlying dma-buf */ 38 } 39 40 static int prime_map_pages_locked(struct ivpu_bo *bo) 41 { 42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 43 struct sg_table *sgt; 44 45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL); 46 if (IS_ERR(sgt)) { 47 ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt)); 48 return PTR_ERR(sgt); 49 } 50 51 bo->sgt = sgt; 52 return 0; 53 } 54 55 static void prime_unmap_pages_locked(struct ivpu_bo *bo) 56 { 57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); 58 bo->sgt = NULL; 59 } 60 61 static const struct ivpu_bo_ops prime_ops = { 62 .type = IVPU_BO_TYPE_PRIME, 63 .name = "prime", 64 .alloc_pages = prime_alloc_pages_locked, 65 .free_pages = prime_free_pages_locked, 66 .map_pages = prime_map_pages_locked, 67 .unmap_pages = prime_unmap_pages_locked, 68 }; 69 70 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) 71 { 72 int npages = bo->base.size >> PAGE_SHIFT; 73 struct page **pages; 74 75 pages = drm_gem_get_pages(&bo->base); 76 if (IS_ERR(pages)) 77 return PTR_ERR(pages); 78 79 if (bo->flags & DRM_IVPU_BO_WC) 80 set_pages_array_wc(pages, npages); 81 else if (bo->flags & DRM_IVPU_BO_UNCACHED) 82 set_pages_array_uc(pages, npages); 83 84 bo->pages = pages; 85 return 0; 86 } 87 88 static void shmem_free_pages_locked(struct ivpu_bo *bo) 89 { 90 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); 92 93 drm_gem_put_pages(&bo->base, bo->pages, true, false); 94 bo->pages = NULL; 95 } 96 97 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) 98 { 99 int npages = bo->base.size >> PAGE_SHIFT; 100 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 101 struct sg_table *sgt; 102 int ret; 103 104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); 105 if (IS_ERR(sgt)) { 106 ivpu_err(vdev, "Failed to allocate sgtable\n"); 107 return PTR_ERR(sgt); 108 } 109 110 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); 111 if (ret) { 112 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); 113 goto err_free_sgt; 114 } 115 116 bo->sgt = sgt; 117 return 0; 118 119 err_free_sgt: 120 kfree(sgt); 121 return ret; 122 } 123 124 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) 125 { 126 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 127 128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); 129 sg_free_table(bo->sgt); 130 kfree(bo->sgt); 131 bo->sgt = NULL; 132 } 133 134 static const struct ivpu_bo_ops shmem_ops = { 135 .type = IVPU_BO_TYPE_SHMEM, 136 .name = "shmem", 137 .alloc_pages = shmem_alloc_pages_locked, 138 .free_pages = shmem_free_pages_locked, 139 .map_pages = ivpu_bo_map_pages_locked, 140 .unmap_pages = ivpu_bo_unmap_pages_locked, 141 }; 142 143 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) 144 { 145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 146 struct page **pages; 147 int ret; 148 149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); 150 if (!pages) 151 return -ENOMEM; 152 153 for (i = 0; i < npages; i++) { 154 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 155 if (!pages[i]) { 156 ret = -ENOMEM; 157 goto err_free_pages; 158 } 159 cond_resched(); 160 } 161 162 bo->pages = pages; 163 return 0; 164 165 err_free_pages: 166 while (i--) 167 put_page(pages[i]); 168 kvfree(pages); 169 return ret; 170 } 171 172 static void internal_free_pages_locked(struct ivpu_bo *bo) 173 { 174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 175 176 for (i = 0; i < npages; i++) 177 put_page(bo->pages[i]); 178 179 kvfree(bo->pages); 180 bo->pages = NULL; 181 } 182 183 static const struct ivpu_bo_ops internal_ops = { 184 .type = IVPU_BO_TYPE_INTERNAL, 185 .name = "internal", 186 .alloc_pages = internal_alloc_pages_locked, 187 .free_pages = internal_free_pages_locked, 188 .map_pages = ivpu_bo_map_pages_locked, 189 .unmap_pages = ivpu_bo_unmap_pages_locked, 190 }; 191 192 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) 193 { 194 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 195 int ret; 196 197 lockdep_assert_held(&bo->lock); 198 drm_WARN_ON(&vdev->drm, bo->sgt); 199 200 ret = bo->ops->alloc_pages(bo); 201 if (ret) { 202 ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret); 203 return ret; 204 } 205 206 ret = bo->ops->map_pages(bo); 207 if (ret) { 208 ivpu_err(vdev, "Failed to map pages for BO: %d", ret); 209 goto err_free_pages; 210 } 211 return ret; 212 213 err_free_pages: 214 bo->ops->free_pages(bo); 215 return ret; 216 } 217 218 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) 219 { 220 mutex_lock(&bo->lock); 221 222 WARN_ON(!bo->sgt); 223 bo->ops->unmap_pages(bo); 224 WARN_ON(bo->sgt); 225 bo->ops->free_pages(bo); 226 WARN_ON(bo->pages); 227 228 mutex_unlock(&bo->lock); 229 } 230 231 /* 232 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. 233 * 234 * This function pins physical memory pages, then maps the physical pages 235 * to IOMMU address space and finally updates the VPU MMU page tables 236 * to allow the VPU to translate VPU address to IOMMU address. 237 */ 238 int __must_check ivpu_bo_pin(struct ivpu_bo *bo) 239 { 240 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 241 int ret = 0; 242 243 mutex_lock(&bo->lock); 244 245 if (!bo->vpu_addr) { 246 ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n", 247 bo->ctx->id, bo->handle); 248 ret = -EINVAL; 249 goto unlock; 250 } 251 252 if (!bo->sgt) { 253 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 254 if (ret) 255 goto unlock; 256 } 257 258 if (!bo->mmu_mapped) { 259 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, 260 ivpu_bo_is_snooped(bo)); 261 if (ret) { 262 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); 263 goto unlock; 264 } 265 bo->mmu_mapped = true; 266 } 267 268 unlock: 269 mutex_unlock(&bo->lock); 270 271 return ret; 272 } 273 274 static int 275 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, 276 const struct ivpu_addr_range *range) 277 { 278 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 279 int ret; 280 281 if (!range) { 282 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) 283 range = &vdev->hw->ranges.shave; 284 else if (bo->flags & DRM_IVPU_BO_DMA_MEM) 285 range = &vdev->hw->ranges.dma; 286 else 287 range = &vdev->hw->ranges.user; 288 } 289 290 mutex_lock(&ctx->lock); 291 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node); 292 if (!ret) { 293 bo->ctx = ctx; 294 bo->vpu_addr = bo->mm_node.start; 295 list_add_tail(&bo->ctx_node, &ctx->bo_list); 296 } 297 mutex_unlock(&ctx->lock); 298 299 return ret; 300 } 301 302 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) 303 { 304 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 305 struct ivpu_mmu_context *ctx = bo->ctx; 306 307 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 308 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 309 310 mutex_lock(&bo->lock); 311 312 if (bo->mmu_mapped) { 313 drm_WARN_ON(&vdev->drm, !bo->sgt); 314 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); 315 bo->mmu_mapped = false; 316 } 317 318 mutex_lock(&ctx->lock); 319 list_del(&bo->ctx_node); 320 bo->vpu_addr = 0; 321 bo->ctx = NULL; 322 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); 323 mutex_unlock(&ctx->lock); 324 325 mutex_unlock(&bo->lock); 326 } 327 328 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) 329 { 330 struct ivpu_bo *bo, *tmp; 331 332 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) 333 ivpu_bo_free_vpu_addr(bo); 334 } 335 336 static struct ivpu_bo * 337 ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, 338 u64 size, u32 flags, const struct ivpu_bo_ops *ops, 339 const struct ivpu_addr_range *range, u64 user_ptr) 340 { 341 struct ivpu_bo *bo; 342 int ret = 0; 343 344 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) 345 return ERR_PTR(-EINVAL); 346 347 switch (flags & DRM_IVPU_BO_CACHE_MASK) { 348 case DRM_IVPU_BO_CACHED: 349 case DRM_IVPU_BO_UNCACHED: 350 case DRM_IVPU_BO_WC: 351 break; 352 default: 353 return ERR_PTR(-EINVAL); 354 } 355 356 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 357 if (!bo) 358 return ERR_PTR(-ENOMEM); 359 360 mutex_init(&bo->lock); 361 bo->base.funcs = &ivpu_gem_funcs; 362 bo->flags = flags; 363 bo->ops = ops; 364 bo->user_ptr = user_ptr; 365 366 if (ops->type == IVPU_BO_TYPE_SHMEM) 367 ret = drm_gem_object_init(&vdev->drm, &bo->base, size); 368 else 369 drm_gem_private_object_init(&vdev->drm, &bo->base, size); 370 371 if (ret) { 372 ivpu_err(vdev, "Failed to initialize drm object\n"); 373 goto err_free; 374 } 375 376 if (flags & DRM_IVPU_BO_MAPPABLE) { 377 ret = drm_gem_create_mmap_offset(&bo->base); 378 if (ret) { 379 ivpu_err(vdev, "Failed to allocate mmap offset\n"); 380 goto err_release; 381 } 382 } 383 384 if (mmu_context) { 385 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range); 386 if (ret) { 387 ivpu_err(vdev, "Failed to add BO to context: %d\n", ret); 388 goto err_release; 389 } 390 } 391 392 return bo; 393 394 err_release: 395 drm_gem_object_release(&bo->base); 396 err_free: 397 kfree(bo); 398 return ERR_PTR(ret); 399 } 400 401 static void ivpu_bo_free(struct drm_gem_object *obj) 402 { 403 struct ivpu_bo *bo = to_ivpu_bo(obj); 404 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 405 406 if (bo->ctx) 407 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 408 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 409 else 410 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", 411 (bool)bo->sgt, bo->mmu_mapped); 412 413 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); 414 415 vunmap(bo->kvaddr); 416 417 if (bo->ctx) 418 ivpu_bo_free_vpu_addr(bo); 419 420 if (bo->sgt) 421 ivpu_bo_unmap_and_free_pages(bo); 422 423 if (bo->base.import_attach) 424 drm_prime_gem_destroy(&bo->base, bo->sgt); 425 426 drm_gem_object_release(&bo->base); 427 428 mutex_destroy(&bo->lock); 429 kfree(bo); 430 } 431 432 static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 433 { 434 struct ivpu_bo *bo = to_ivpu_bo(obj); 435 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 436 437 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", 438 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name); 439 440 if (obj->import_attach) { 441 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 442 drm_gem_object_put(obj); 443 vma->vm_private_data = NULL; 444 return dma_buf_mmap(obj->dma_buf, vma, 0); 445 } 446 447 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND); 448 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); 449 450 return 0; 451 } 452 453 static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) 454 { 455 struct ivpu_bo *bo = to_ivpu_bo(obj); 456 loff_t npages = obj->size >> PAGE_SHIFT; 457 int ret = 0; 458 459 mutex_lock(&bo->lock); 460 461 if (!bo->sgt) 462 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 463 464 mutex_unlock(&bo->lock); 465 466 if (ret) 467 return ERR_PTR(ret); 468 469 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); 470 } 471 472 static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) 473 { 474 struct vm_area_struct *vma = vmf->vma; 475 struct drm_gem_object *obj = vma->vm_private_data; 476 struct ivpu_bo *bo = to_ivpu_bo(obj); 477 loff_t npages = obj->size >> PAGE_SHIFT; 478 pgoff_t page_offset; 479 struct page *page; 480 vm_fault_t ret; 481 int err; 482 483 mutex_lock(&bo->lock); 484 485 if (!bo->sgt) { 486 err = ivpu_bo_alloc_and_map_pages_locked(bo); 487 if (err) { 488 ret = vmf_error(err); 489 goto unlock; 490 } 491 } 492 493 /* We don't use vmf->pgoff since that has the fake offset */ 494 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 495 if (page_offset >= npages) { 496 ret = VM_FAULT_SIGBUS; 497 } else { 498 page = bo->pages[page_offset]; 499 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); 500 } 501 502 unlock: 503 mutex_unlock(&bo->lock); 504 505 return ret; 506 } 507 508 static const struct vm_operations_struct ivpu_vm_ops = { 509 .fault = ivpu_vm_fault, 510 .open = drm_gem_vm_open, 511 .close = drm_gem_vm_close, 512 }; 513 514 static const struct drm_gem_object_funcs ivpu_gem_funcs = { 515 .free = ivpu_bo_free, 516 .mmap = ivpu_bo_mmap, 517 .vm_ops = &ivpu_vm_ops, 518 .get_sg_table = ivpu_bo_get_sg_table, 519 }; 520 521 int 522 ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 523 { 524 struct ivpu_file_priv *file_priv = file->driver_priv; 525 struct ivpu_device *vdev = file_priv->vdev; 526 struct drm_ivpu_bo_create *args = data; 527 u64 size = PAGE_ALIGN(args->size); 528 struct ivpu_bo *bo; 529 int ret; 530 531 if (args->flags & ~DRM_IVPU_BO_FLAGS) 532 return -EINVAL; 533 534 if (size == 0) 535 return -EINVAL; 536 537 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); 538 if (IS_ERR(bo)) { 539 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", 540 bo, file_priv->ctx.id, args->size, args->flags); 541 return PTR_ERR(bo); 542 } 543 544 ret = drm_gem_handle_create(file, &bo->base, &bo->handle); 545 if (!ret) { 546 args->vpu_addr = bo->vpu_addr; 547 args->handle = bo->handle; 548 } 549 550 drm_gem_object_put(&bo->base); 551 552 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", 553 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags); 554 555 return ret; 556 } 557 558 struct ivpu_bo * 559 ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags) 560 { 561 const struct ivpu_addr_range *range; 562 struct ivpu_addr_range fixed_range; 563 struct ivpu_bo *bo; 564 pgprot_t prot; 565 int ret; 566 567 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); 568 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); 569 570 if (vpu_addr) { 571 fixed_range.start = vpu_addr; 572 fixed_range.end = vpu_addr + size; 573 range = &fixed_range; 574 } else { 575 range = &vdev->hw->ranges.global; 576 } 577 578 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); 579 if (IS_ERR(bo)) { 580 ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", 581 bo, vpu_addr, size, flags); 582 return NULL; 583 } 584 585 ret = ivpu_bo_pin(bo); 586 if (ret) 587 goto err_put; 588 589 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 590 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); 591 592 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); 593 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); 594 if (!bo->kvaddr) { 595 ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n"); 596 goto err_put; 597 } 598 599 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", 600 bo->vpu_addr, bo->base.size, flags); 601 602 return bo; 603 604 err_put: 605 drm_gem_object_put(&bo->base); 606 return NULL; 607 } 608 609 void ivpu_bo_free_internal(struct ivpu_bo *bo) 610 { 611 drm_gem_object_put(&bo->base); 612 } 613 614 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) 615 { 616 struct ivpu_device *vdev = to_ivpu_device(dev); 617 struct dma_buf_attachment *attach; 618 struct ivpu_bo *bo; 619 620 attach = dma_buf_attach(buf, dev->dev); 621 if (IS_ERR(attach)) 622 return ERR_CAST(attach); 623 624 get_dma_buf(buf); 625 626 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); 627 if (IS_ERR(bo)) { 628 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); 629 goto err_detach; 630 } 631 632 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); 633 634 bo->base.import_attach = attach; 635 636 return &bo->base; 637 638 err_detach: 639 dma_buf_detach(buf, attach); 640 dma_buf_put(buf); 641 return ERR_CAST(bo); 642 } 643 644 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 645 { 646 struct ivpu_file_priv *file_priv = file->driver_priv; 647 struct ivpu_device *vdev = to_ivpu_device(dev); 648 struct drm_ivpu_bo_info *args = data; 649 struct drm_gem_object *obj; 650 struct ivpu_bo *bo; 651 int ret = 0; 652 653 obj = drm_gem_object_lookup(file, args->handle); 654 if (!obj) 655 return -ENOENT; 656 657 bo = to_ivpu_bo(obj); 658 659 mutex_lock(&bo->lock); 660 661 if (!bo->ctx) { 662 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); 663 if (ret) { 664 ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret); 665 goto unlock; 666 } 667 } 668 669 args->flags = bo->flags; 670 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); 671 args->vpu_addr = bo->vpu_addr; 672 args->size = obj->size; 673 unlock: 674 mutex_unlock(&bo->lock); 675 drm_gem_object_put(obj); 676 return ret; 677 } 678 679 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 680 { 681 struct drm_ivpu_bo_wait *args = data; 682 struct drm_gem_object *obj; 683 unsigned long timeout; 684 long ret; 685 686 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 687 688 obj = drm_gem_object_lookup(file, args->handle); 689 if (!obj) 690 return -EINVAL; 691 692 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); 693 if (ret == 0) { 694 ret = -ETIMEDOUT; 695 } else if (ret > 0) { 696 ret = 0; 697 args->job_status = to_ivpu_bo(obj)->job_status; 698 } 699 700 drm_gem_object_put(obj); 701 702 return ret; 703 } 704 705 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) 706 { 707 unsigned long dma_refcount = 0; 708 709 if (bo->base.dma_buf && bo->base.dma_buf->file) 710 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); 711 712 drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", 713 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, 714 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); 715 } 716 717 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) 718 { 719 struct ivpu_device *vdev = to_ivpu_device(dev); 720 struct ivpu_file_priv *file_priv; 721 unsigned long ctx_id; 722 struct ivpu_bo *bo; 723 724 drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", 725 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); 726 727 mutex_lock(&vdev->gctx.lock); 728 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) 729 ivpu_bo_print_info(bo, p); 730 mutex_unlock(&vdev->gctx.lock); 731 732 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { 733 file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); 734 if (!file_priv) 735 continue; 736 737 mutex_lock(&file_priv->ctx.lock); 738 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) 739 ivpu_bo_print_info(bo, p); 740 mutex_unlock(&file_priv->ctx.lock); 741 742 ivpu_file_priv_put(&file_priv); 743 } 744 } 745 746 void ivpu_bo_list_print(struct drm_device *dev) 747 { 748 struct drm_printer p = drm_info_printer(dev->dev); 749 750 ivpu_bo_list(dev, &p); 751 } 752