1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/highmem.h> 8 #include <linux/module.h> 9 #include <linux/set_memory.h> 10 #include <linux/xarray.h> 11 12 #include <drm/drm_cache.h> 13 #include <drm/drm_debugfs.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_utils.h> 16 17 #include "ivpu_drv.h" 18 #include "ivpu_gem.h" 19 #include "ivpu_hw.h" 20 #include "ivpu_mmu.h" 21 #include "ivpu_mmu_context.h" 22 23 MODULE_IMPORT_NS(DMA_BUF); 24 25 static const struct drm_gem_object_funcs ivpu_gem_funcs; 26 27 static struct lock_class_key prime_bo_lock_class_key; 28 29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) 30 { 31 /* Pages are managed by the underlying dma-buf */ 32 return 0; 33 } 34 35 static void prime_free_pages_locked(struct ivpu_bo *bo) 36 { 37 /* Pages are managed by the underlying dma-buf */ 38 } 39 40 static int prime_map_pages_locked(struct ivpu_bo *bo) 41 { 42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 43 struct sg_table *sgt; 44 45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL); 46 if (IS_ERR(sgt)) { 47 ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt)); 48 return PTR_ERR(sgt); 49 } 50 51 bo->sgt = sgt; 52 return 0; 53 } 54 55 static void prime_unmap_pages_locked(struct ivpu_bo *bo) 56 { 57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); 58 bo->sgt = NULL; 59 } 60 61 static const struct ivpu_bo_ops prime_ops = { 62 .type = IVPU_BO_TYPE_PRIME, 63 .name = "prime", 64 .alloc_pages = prime_alloc_pages_locked, 65 .free_pages = prime_free_pages_locked, 66 .map_pages = prime_map_pages_locked, 67 .unmap_pages = prime_unmap_pages_locked, 68 }; 69 70 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) 71 { 72 int npages = bo->base.size >> PAGE_SHIFT; 73 struct page **pages; 74 75 pages = drm_gem_get_pages(&bo->base); 76 if (IS_ERR(pages)) 77 return PTR_ERR(pages); 78 79 if (bo->flags & DRM_IVPU_BO_WC) 80 set_pages_array_wc(pages, npages); 81 else if (bo->flags & DRM_IVPU_BO_UNCACHED) 82 set_pages_array_uc(pages, npages); 83 84 bo->pages = pages; 85 return 0; 86 } 87 88 static void shmem_free_pages_locked(struct ivpu_bo *bo) 89 { 90 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); 92 93 drm_gem_put_pages(&bo->base, bo->pages, true, false); 94 bo->pages = NULL; 95 } 96 97 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) 98 { 99 int npages = bo->base.size >> PAGE_SHIFT; 100 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 101 struct sg_table *sgt; 102 int ret; 103 104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); 105 if (IS_ERR(sgt)) { 106 ivpu_err(vdev, "Failed to allocate sgtable\n"); 107 return PTR_ERR(sgt); 108 } 109 110 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); 111 if (ret) { 112 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); 113 goto err_free_sgt; 114 } 115 116 bo->sgt = sgt; 117 return 0; 118 119 err_free_sgt: 120 kfree(sgt); 121 return ret; 122 } 123 124 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) 125 { 126 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 127 128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); 129 sg_free_table(bo->sgt); 130 kfree(bo->sgt); 131 bo->sgt = NULL; 132 } 133 134 static const struct ivpu_bo_ops shmem_ops = { 135 .type = IVPU_BO_TYPE_SHMEM, 136 .name = "shmem", 137 .alloc_pages = shmem_alloc_pages_locked, 138 .free_pages = shmem_free_pages_locked, 139 .map_pages = ivpu_bo_map_pages_locked, 140 .unmap_pages = ivpu_bo_unmap_pages_locked, 141 }; 142 143 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) 144 { 145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 146 struct page **pages; 147 int ret; 148 149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); 150 if (!pages) 151 return -ENOMEM; 152 153 for (i = 0; i < npages; i++) { 154 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 155 if (!pages[i]) { 156 ret = -ENOMEM; 157 goto err_free_pages; 158 } 159 cond_resched(); 160 } 161 162 bo->pages = pages; 163 return 0; 164 165 err_free_pages: 166 while (i--) 167 put_page(pages[i]); 168 kvfree(pages); 169 return ret; 170 } 171 172 static void internal_free_pages_locked(struct ivpu_bo *bo) 173 { 174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 175 176 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 177 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); 178 179 for (i = 0; i < npages; i++) 180 put_page(bo->pages[i]); 181 182 kvfree(bo->pages); 183 bo->pages = NULL; 184 } 185 186 static const struct ivpu_bo_ops internal_ops = { 187 .type = IVPU_BO_TYPE_INTERNAL, 188 .name = "internal", 189 .alloc_pages = internal_alloc_pages_locked, 190 .free_pages = internal_free_pages_locked, 191 .map_pages = ivpu_bo_map_pages_locked, 192 .unmap_pages = ivpu_bo_unmap_pages_locked, 193 }; 194 195 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) 196 { 197 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 198 int ret; 199 200 lockdep_assert_held(&bo->lock); 201 drm_WARN_ON(&vdev->drm, bo->sgt); 202 203 ret = bo->ops->alloc_pages(bo); 204 if (ret) { 205 ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret); 206 return ret; 207 } 208 209 ret = bo->ops->map_pages(bo); 210 if (ret) { 211 ivpu_err(vdev, "Failed to map pages for BO: %d", ret); 212 goto err_free_pages; 213 } 214 return ret; 215 216 err_free_pages: 217 bo->ops->free_pages(bo); 218 return ret; 219 } 220 221 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) 222 { 223 mutex_lock(&bo->lock); 224 225 WARN_ON(!bo->sgt); 226 bo->ops->unmap_pages(bo); 227 WARN_ON(bo->sgt); 228 bo->ops->free_pages(bo); 229 WARN_ON(bo->pages); 230 231 mutex_unlock(&bo->lock); 232 } 233 234 /* 235 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. 236 * 237 * This function pins physical memory pages, then maps the physical pages 238 * to IOMMU address space and finally updates the VPU MMU page tables 239 * to allow the VPU to translate VPU address to IOMMU address. 240 */ 241 int __must_check ivpu_bo_pin(struct ivpu_bo *bo) 242 { 243 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 244 int ret = 0; 245 246 mutex_lock(&bo->lock); 247 248 if (!bo->vpu_addr) { 249 ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n", 250 bo->ctx->id, bo->handle); 251 ret = -EINVAL; 252 goto unlock; 253 } 254 255 if (!bo->sgt) { 256 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 257 if (ret) 258 goto unlock; 259 } 260 261 if (!bo->mmu_mapped) { 262 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, 263 ivpu_bo_is_snooped(bo)); 264 if (ret) { 265 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); 266 goto unlock; 267 } 268 bo->mmu_mapped = true; 269 } 270 271 unlock: 272 mutex_unlock(&bo->lock); 273 274 return ret; 275 } 276 277 static int 278 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, 279 const struct ivpu_addr_range *range) 280 { 281 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 282 int ret; 283 284 if (!range) { 285 if (bo->flags & DRM_IVPU_BO_HIGH_MEM) 286 range = &vdev->hw->ranges.user_high; 287 else 288 range = &vdev->hw->ranges.user_low; 289 } 290 291 mutex_lock(&ctx->lock); 292 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node); 293 if (!ret) { 294 bo->ctx = ctx; 295 bo->vpu_addr = bo->mm_node.start; 296 list_add_tail(&bo->ctx_node, &ctx->bo_list); 297 } 298 mutex_unlock(&ctx->lock); 299 300 return ret; 301 } 302 303 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) 304 { 305 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 306 struct ivpu_mmu_context *ctx = bo->ctx; 307 308 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 309 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 310 311 mutex_lock(&bo->lock); 312 313 if (bo->mmu_mapped) { 314 drm_WARN_ON(&vdev->drm, !bo->sgt); 315 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); 316 bo->mmu_mapped = false; 317 } 318 319 mutex_lock(&ctx->lock); 320 list_del(&bo->ctx_node); 321 bo->vpu_addr = 0; 322 bo->ctx = NULL; 323 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); 324 mutex_unlock(&ctx->lock); 325 326 mutex_unlock(&bo->lock); 327 } 328 329 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) 330 { 331 struct ivpu_bo *bo, *tmp; 332 333 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) 334 ivpu_bo_free_vpu_addr(bo); 335 } 336 337 static struct ivpu_bo * 338 ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, 339 u64 size, u32 flags, const struct ivpu_bo_ops *ops, 340 const struct ivpu_addr_range *range, u64 user_ptr) 341 { 342 struct ivpu_bo *bo; 343 int ret = 0; 344 345 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) 346 return ERR_PTR(-EINVAL); 347 348 switch (flags & DRM_IVPU_BO_CACHE_MASK) { 349 case DRM_IVPU_BO_CACHED: 350 case DRM_IVPU_BO_UNCACHED: 351 case DRM_IVPU_BO_WC: 352 break; 353 default: 354 return ERR_PTR(-EINVAL); 355 } 356 357 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 358 if (!bo) 359 return ERR_PTR(-ENOMEM); 360 361 mutex_init(&bo->lock); 362 bo->base.funcs = &ivpu_gem_funcs; 363 bo->flags = flags; 364 bo->ops = ops; 365 bo->user_ptr = user_ptr; 366 367 if (ops->type == IVPU_BO_TYPE_SHMEM) 368 ret = drm_gem_object_init(&vdev->drm, &bo->base, size); 369 else 370 drm_gem_private_object_init(&vdev->drm, &bo->base, size); 371 372 if (ret) { 373 ivpu_err(vdev, "Failed to initialize drm object\n"); 374 goto err_free; 375 } 376 377 if (flags & DRM_IVPU_BO_MAPPABLE) { 378 ret = drm_gem_create_mmap_offset(&bo->base); 379 if (ret) { 380 ivpu_err(vdev, "Failed to allocate mmap offset\n"); 381 goto err_release; 382 } 383 } 384 385 if (mmu_context) { 386 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range); 387 if (ret) { 388 ivpu_err(vdev, "Failed to add BO to context: %d\n", ret); 389 goto err_release; 390 } 391 } 392 393 return bo; 394 395 err_release: 396 drm_gem_object_release(&bo->base); 397 err_free: 398 kfree(bo); 399 return ERR_PTR(ret); 400 } 401 402 static void ivpu_bo_free(struct drm_gem_object *obj) 403 { 404 struct ivpu_bo *bo = to_ivpu_bo(obj); 405 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 406 407 if (bo->ctx) 408 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 409 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 410 else 411 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", 412 (bool)bo->sgt, bo->mmu_mapped); 413 414 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); 415 416 vunmap(bo->kvaddr); 417 418 if (bo->ctx) 419 ivpu_bo_free_vpu_addr(bo); 420 421 if (bo->sgt) 422 ivpu_bo_unmap_and_free_pages(bo); 423 424 if (bo->base.import_attach) 425 drm_prime_gem_destroy(&bo->base, bo->sgt); 426 427 drm_gem_object_release(&bo->base); 428 429 mutex_destroy(&bo->lock); 430 kfree(bo); 431 } 432 433 static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 434 { 435 struct ivpu_bo *bo = to_ivpu_bo(obj); 436 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 437 438 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", 439 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name); 440 441 if (obj->import_attach) { 442 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 443 drm_gem_object_put(obj); 444 vma->vm_private_data = NULL; 445 return dma_buf_mmap(obj->dma_buf, vma, 0); 446 } 447 448 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND); 449 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); 450 451 return 0; 452 } 453 454 static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) 455 { 456 struct ivpu_bo *bo = to_ivpu_bo(obj); 457 loff_t npages = obj->size >> PAGE_SHIFT; 458 int ret = 0; 459 460 mutex_lock(&bo->lock); 461 462 if (!bo->sgt) 463 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 464 465 mutex_unlock(&bo->lock); 466 467 if (ret) 468 return ERR_PTR(ret); 469 470 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); 471 } 472 473 static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) 474 { 475 struct vm_area_struct *vma = vmf->vma; 476 struct drm_gem_object *obj = vma->vm_private_data; 477 struct ivpu_bo *bo = to_ivpu_bo(obj); 478 loff_t npages = obj->size >> PAGE_SHIFT; 479 pgoff_t page_offset; 480 struct page *page; 481 vm_fault_t ret; 482 int err; 483 484 mutex_lock(&bo->lock); 485 486 if (!bo->sgt) { 487 err = ivpu_bo_alloc_and_map_pages_locked(bo); 488 if (err) { 489 ret = vmf_error(err); 490 goto unlock; 491 } 492 } 493 494 /* We don't use vmf->pgoff since that has the fake offset */ 495 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 496 if (page_offset >= npages) { 497 ret = VM_FAULT_SIGBUS; 498 } else { 499 page = bo->pages[page_offset]; 500 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); 501 } 502 503 unlock: 504 mutex_unlock(&bo->lock); 505 506 return ret; 507 } 508 509 static const struct vm_operations_struct ivpu_vm_ops = { 510 .fault = ivpu_vm_fault, 511 .open = drm_gem_vm_open, 512 .close = drm_gem_vm_close, 513 }; 514 515 static const struct drm_gem_object_funcs ivpu_gem_funcs = { 516 .free = ivpu_bo_free, 517 .mmap = ivpu_bo_mmap, 518 .vm_ops = &ivpu_vm_ops, 519 .get_sg_table = ivpu_bo_get_sg_table, 520 }; 521 522 int 523 ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 524 { 525 struct ivpu_file_priv *file_priv = file->driver_priv; 526 struct ivpu_device *vdev = file_priv->vdev; 527 struct drm_ivpu_bo_create *args = data; 528 u64 size = PAGE_ALIGN(args->size); 529 struct ivpu_bo *bo; 530 int ret; 531 532 if (args->flags & ~DRM_IVPU_BO_FLAGS) 533 return -EINVAL; 534 535 if (size == 0) 536 return -EINVAL; 537 538 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); 539 if (IS_ERR(bo)) { 540 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", 541 bo, file_priv->ctx.id, args->size, args->flags); 542 return PTR_ERR(bo); 543 } 544 545 ret = drm_gem_handle_create(file, &bo->base, &bo->handle); 546 if (!ret) { 547 args->vpu_addr = bo->vpu_addr; 548 args->handle = bo->handle; 549 } 550 551 drm_gem_object_put(&bo->base); 552 553 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", 554 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags); 555 556 return ret; 557 } 558 559 struct ivpu_bo * 560 ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags) 561 { 562 const struct ivpu_addr_range *range; 563 struct ivpu_addr_range fixed_range; 564 struct ivpu_bo *bo; 565 pgprot_t prot; 566 int ret; 567 568 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); 569 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); 570 571 if (vpu_addr) { 572 fixed_range.start = vpu_addr; 573 fixed_range.end = vpu_addr + size; 574 range = &fixed_range; 575 } else { 576 range = &vdev->hw->ranges.global_low; 577 } 578 579 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); 580 if (IS_ERR(bo)) { 581 ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", 582 bo, vpu_addr, size, flags); 583 return NULL; 584 } 585 586 ret = ivpu_bo_pin(bo); 587 if (ret) 588 goto err_put; 589 590 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 591 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); 592 593 if (bo->flags & DRM_IVPU_BO_WC) 594 set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT); 595 else if (bo->flags & DRM_IVPU_BO_UNCACHED) 596 set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT); 597 598 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); 599 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); 600 if (!bo->kvaddr) { 601 ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n"); 602 goto err_put; 603 } 604 605 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", 606 bo->vpu_addr, bo->base.size, flags); 607 608 return bo; 609 610 err_put: 611 drm_gem_object_put(&bo->base); 612 return NULL; 613 } 614 615 void ivpu_bo_free_internal(struct ivpu_bo *bo) 616 { 617 drm_gem_object_put(&bo->base); 618 } 619 620 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) 621 { 622 struct ivpu_device *vdev = to_ivpu_device(dev); 623 struct dma_buf_attachment *attach; 624 struct ivpu_bo *bo; 625 626 attach = dma_buf_attach(buf, dev->dev); 627 if (IS_ERR(attach)) 628 return ERR_CAST(attach); 629 630 get_dma_buf(buf); 631 632 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); 633 if (IS_ERR(bo)) { 634 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); 635 goto err_detach; 636 } 637 638 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); 639 640 bo->base.import_attach = attach; 641 642 return &bo->base; 643 644 err_detach: 645 dma_buf_detach(buf, attach); 646 dma_buf_put(buf); 647 return ERR_CAST(bo); 648 } 649 650 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 651 { 652 struct ivpu_file_priv *file_priv = file->driver_priv; 653 struct ivpu_device *vdev = to_ivpu_device(dev); 654 struct drm_ivpu_bo_info *args = data; 655 struct drm_gem_object *obj; 656 struct ivpu_bo *bo; 657 int ret = 0; 658 659 obj = drm_gem_object_lookup(file, args->handle); 660 if (!obj) 661 return -ENOENT; 662 663 bo = to_ivpu_bo(obj); 664 665 mutex_lock(&bo->lock); 666 667 if (!bo->ctx) { 668 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); 669 if (ret) { 670 ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret); 671 goto unlock; 672 } 673 } 674 675 args->flags = bo->flags; 676 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); 677 args->vpu_addr = bo->vpu_addr; 678 args->size = obj->size; 679 unlock: 680 mutex_unlock(&bo->lock); 681 drm_gem_object_put(obj); 682 return ret; 683 } 684 685 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 686 { 687 struct drm_ivpu_bo_wait *args = data; 688 struct drm_gem_object *obj; 689 unsigned long timeout; 690 long ret; 691 692 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 693 694 obj = drm_gem_object_lookup(file, args->handle); 695 if (!obj) 696 return -EINVAL; 697 698 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); 699 if (ret == 0) { 700 ret = -ETIMEDOUT; 701 } else if (ret > 0) { 702 ret = 0; 703 args->job_status = to_ivpu_bo(obj)->job_status; 704 } 705 706 drm_gem_object_put(obj); 707 708 return ret; 709 } 710 711 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) 712 { 713 unsigned long dma_refcount = 0; 714 715 if (bo->base.dma_buf && bo->base.dma_buf->file) 716 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); 717 718 drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", 719 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, 720 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); 721 } 722 723 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) 724 { 725 struct ivpu_device *vdev = to_ivpu_device(dev); 726 struct ivpu_file_priv *file_priv; 727 unsigned long ctx_id; 728 struct ivpu_bo *bo; 729 730 drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", 731 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); 732 733 mutex_lock(&vdev->gctx.lock); 734 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) 735 ivpu_bo_print_info(bo, p); 736 mutex_unlock(&vdev->gctx.lock); 737 738 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { 739 file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); 740 if (!file_priv) 741 continue; 742 743 mutex_lock(&file_priv->ctx.lock); 744 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) 745 ivpu_bo_print_info(bo, p); 746 mutex_unlock(&file_priv->ctx.lock); 747 748 ivpu_file_priv_put(&file_priv); 749 } 750 } 751 752 void ivpu_bo_list_print(struct drm_device *dev) 753 { 754 struct drm_printer p = drm_info_printer(dev->dev); 755 756 ivpu_bo_list(dev, &p); 757 } 758