1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 static void update_inactive(struct msm_gem_object *msm_obj); 22 23 static dma_addr_t physaddr(struct drm_gem_object *obj) 24 { 25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 26 struct msm_drm_private *priv = obj->dev->dev_private; 27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 28 priv->vram.paddr; 29 } 30 31 static bool use_pages(struct drm_gem_object *obj) 32 { 33 struct msm_gem_object *msm_obj = to_msm_bo(obj); 34 return !msm_obj->vram_node; 35 } 36 37 /* 38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 39 * API. Really GPU cache is out of scope here (handled on cmdstream) 40 * and all we need to do is invalidate newly allocated pages before 41 * mapping to CPU as uncached/writecombine. 42 * 43 * On top of this, we have the added headache, that depending on 44 * display generation, the display's iommu may be wired up to either 45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 46 * that here we either have dma-direct or iommu ops. 47 * 48 * Let this be a cautionary tail of abstraction gone wrong. 49 */ 50 51 static void sync_for_device(struct msm_gem_object *msm_obj) 52 { 53 struct device *dev = msm_obj->base.dev->dev; 54 55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 56 } 57 58 static void sync_for_cpu(struct msm_gem_object *msm_obj) 59 { 60 struct device *dev = msm_obj->base.dev->dev; 61 62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 63 } 64 65 /* allocate pages from VRAM carveout, used when no IOMMU: */ 66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 67 { 68 struct msm_gem_object *msm_obj = to_msm_bo(obj); 69 struct msm_drm_private *priv = obj->dev->dev_private; 70 dma_addr_t paddr; 71 struct page **p; 72 int ret, i; 73 74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 75 if (!p) 76 return ERR_PTR(-ENOMEM); 77 78 spin_lock(&priv->vram.lock); 79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 80 spin_unlock(&priv->vram.lock); 81 if (ret) { 82 kvfree(p); 83 return ERR_PTR(ret); 84 } 85 86 paddr = physaddr(obj); 87 for (i = 0; i < npages; i++) { 88 p[i] = phys_to_page(paddr); 89 paddr += PAGE_SIZE; 90 } 91 92 return p; 93 } 94 95 static struct page **get_pages(struct drm_gem_object *obj) 96 { 97 struct msm_gem_object *msm_obj = to_msm_bo(obj); 98 99 GEM_WARN_ON(!msm_gem_is_locked(obj)); 100 101 if (!msm_obj->pages) { 102 struct drm_device *dev = obj->dev; 103 struct page **p; 104 int npages = obj->size >> PAGE_SHIFT; 105 106 if (use_pages(obj)) 107 p = drm_gem_get_pages(obj); 108 else 109 p = get_pages_vram(obj, npages); 110 111 if (IS_ERR(p)) { 112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 113 PTR_ERR(p)); 114 return p; 115 } 116 117 msm_obj->pages = p; 118 119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 120 if (IS_ERR(msm_obj->sgt)) { 121 void *ptr = ERR_CAST(msm_obj->sgt); 122 123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 124 msm_obj->sgt = NULL; 125 return ptr; 126 } 127 128 /* For non-cached buffers, ensure the new pages are clean 129 * because display controller, GPU, etc. are not coherent: 130 */ 131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 132 sync_for_device(msm_obj); 133 134 GEM_WARN_ON(msm_obj->active_count); 135 update_inactive(msm_obj); 136 } 137 138 return msm_obj->pages; 139 } 140 141 static void put_pages_vram(struct drm_gem_object *obj) 142 { 143 struct msm_gem_object *msm_obj = to_msm_bo(obj); 144 struct msm_drm_private *priv = obj->dev->dev_private; 145 146 spin_lock(&priv->vram.lock); 147 drm_mm_remove_node(msm_obj->vram_node); 148 spin_unlock(&priv->vram.lock); 149 150 kvfree(msm_obj->pages); 151 } 152 153 static void put_pages(struct drm_gem_object *obj) 154 { 155 struct msm_gem_object *msm_obj = to_msm_bo(obj); 156 157 if (msm_obj->pages) { 158 if (msm_obj->sgt) { 159 /* For non-cached buffers, ensure the new 160 * pages are clean because display controller, 161 * GPU, etc. are not coherent: 162 */ 163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 164 sync_for_cpu(msm_obj); 165 166 sg_free_table(msm_obj->sgt); 167 kfree(msm_obj->sgt); 168 msm_obj->sgt = NULL; 169 } 170 171 if (use_pages(obj)) 172 drm_gem_put_pages(obj, msm_obj->pages, true, false); 173 else 174 put_pages_vram(obj); 175 176 msm_obj->pages = NULL; 177 } 178 } 179 180 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 181 { 182 struct msm_gem_object *msm_obj = to_msm_bo(obj); 183 struct page **p; 184 185 msm_gem_lock(obj); 186 187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 188 msm_gem_unlock(obj); 189 return ERR_PTR(-EBUSY); 190 } 191 192 p = get_pages(obj); 193 194 if (!IS_ERR(p)) { 195 msm_obj->pin_count++; 196 update_inactive(msm_obj); 197 } 198 199 msm_gem_unlock(obj); 200 return p; 201 } 202 203 void msm_gem_put_pages(struct drm_gem_object *obj) 204 { 205 struct msm_gem_object *msm_obj = to_msm_bo(obj); 206 207 msm_gem_lock(obj); 208 msm_obj->pin_count--; 209 GEM_WARN_ON(msm_obj->pin_count < 0); 210 update_inactive(msm_obj); 211 msm_gem_unlock(obj); 212 } 213 214 int msm_gem_mmap_obj(struct drm_gem_object *obj, 215 struct vm_area_struct *vma) 216 { 217 struct msm_gem_object *msm_obj = to_msm_bo(obj); 218 219 vma->vm_flags &= ~VM_PFNMAP; 220 vma->vm_flags |= VM_MIXEDMAP; 221 222 if (msm_obj->flags & MSM_BO_WC) { 223 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 224 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 225 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 226 } else { 227 /* 228 * Shunt off cached objs to shmem file so they have their own 229 * address_space (so unmap_mapping_range does what we want, 230 * in particular in the case of mmap'd dmabufs) 231 */ 232 vma->vm_pgoff = 0; 233 vma_set_file(vma, obj->filp); 234 235 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 236 } 237 238 return 0; 239 } 240 241 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 242 { 243 int ret; 244 245 ret = drm_gem_mmap(filp, vma); 246 if (ret) { 247 DBG("mmap failed: %d", ret); 248 return ret; 249 } 250 251 return msm_gem_mmap_obj(vma->vm_private_data, vma); 252 } 253 254 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 255 { 256 struct vm_area_struct *vma = vmf->vma; 257 struct drm_gem_object *obj = vma->vm_private_data; 258 struct msm_gem_object *msm_obj = to_msm_bo(obj); 259 struct page **pages; 260 unsigned long pfn; 261 pgoff_t pgoff; 262 int err; 263 vm_fault_t ret; 264 265 /* 266 * vm_ops.open/drm_gem_mmap_obj and close get and put 267 * a reference on obj. So, we dont need to hold one here. 268 */ 269 err = msm_gem_lock_interruptible(obj); 270 if (err) { 271 ret = VM_FAULT_NOPAGE; 272 goto out; 273 } 274 275 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 276 msm_gem_unlock(obj); 277 return VM_FAULT_SIGBUS; 278 } 279 280 /* make sure we have pages attached now */ 281 pages = get_pages(obj); 282 if (IS_ERR(pages)) { 283 ret = vmf_error(PTR_ERR(pages)); 284 goto out_unlock; 285 } 286 287 /* We don't use vmf->pgoff since that has the fake offset: */ 288 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 289 290 pfn = page_to_pfn(pages[pgoff]); 291 292 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 293 pfn, pfn << PAGE_SHIFT); 294 295 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 296 out_unlock: 297 msm_gem_unlock(obj); 298 out: 299 return ret; 300 } 301 302 /** get mmap offset */ 303 static uint64_t mmap_offset(struct drm_gem_object *obj) 304 { 305 struct drm_device *dev = obj->dev; 306 int ret; 307 308 GEM_WARN_ON(!msm_gem_is_locked(obj)); 309 310 /* Make it mmapable */ 311 ret = drm_gem_create_mmap_offset(obj); 312 313 if (ret) { 314 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 315 return 0; 316 } 317 318 return drm_vma_node_offset_addr(&obj->vma_node); 319 } 320 321 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 322 { 323 uint64_t offset; 324 325 msm_gem_lock(obj); 326 offset = mmap_offset(obj); 327 msm_gem_unlock(obj); 328 return offset; 329 } 330 331 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 332 struct msm_gem_address_space *aspace) 333 { 334 struct msm_gem_object *msm_obj = to_msm_bo(obj); 335 struct msm_gem_vma *vma; 336 337 GEM_WARN_ON(!msm_gem_is_locked(obj)); 338 339 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 340 if (!vma) 341 return ERR_PTR(-ENOMEM); 342 343 vma->aspace = aspace; 344 345 list_add_tail(&vma->list, &msm_obj->vmas); 346 347 return vma; 348 } 349 350 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 351 struct msm_gem_address_space *aspace) 352 { 353 struct msm_gem_object *msm_obj = to_msm_bo(obj); 354 struct msm_gem_vma *vma; 355 356 GEM_WARN_ON(!msm_gem_is_locked(obj)); 357 358 list_for_each_entry(vma, &msm_obj->vmas, list) { 359 if (vma->aspace == aspace) 360 return vma; 361 } 362 363 return NULL; 364 } 365 366 static void del_vma(struct msm_gem_vma *vma) 367 { 368 if (!vma) 369 return; 370 371 list_del(&vma->list); 372 kfree(vma); 373 } 374 375 /** 376 * If close is true, this also closes the VMA (releasing the allocated 377 * iova range) in addition to removing the iommu mapping. In the eviction 378 * case (!close), we keep the iova allocated, but only remove the iommu 379 * mapping. 380 */ 381 static void 382 put_iova_spaces(struct drm_gem_object *obj, bool close) 383 { 384 struct msm_gem_object *msm_obj = to_msm_bo(obj); 385 struct msm_gem_vma *vma; 386 387 GEM_WARN_ON(!msm_gem_is_locked(obj)); 388 389 list_for_each_entry(vma, &msm_obj->vmas, list) { 390 if (vma->aspace) { 391 msm_gem_purge_vma(vma->aspace, vma); 392 if (close) 393 msm_gem_close_vma(vma->aspace, vma); 394 } 395 } 396 } 397 398 /* Called with msm_obj locked */ 399 static void 400 put_iova_vmas(struct drm_gem_object *obj) 401 { 402 struct msm_gem_object *msm_obj = to_msm_bo(obj); 403 struct msm_gem_vma *vma, *tmp; 404 405 GEM_WARN_ON(!msm_gem_is_locked(obj)); 406 407 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 408 del_vma(vma); 409 } 410 } 411 412 static int get_iova_locked(struct drm_gem_object *obj, 413 struct msm_gem_address_space *aspace, uint64_t *iova, 414 u64 range_start, u64 range_end) 415 { 416 struct msm_gem_vma *vma; 417 int ret = 0; 418 419 GEM_WARN_ON(!msm_gem_is_locked(obj)); 420 421 vma = lookup_vma(obj, aspace); 422 423 if (!vma) { 424 vma = add_vma(obj, aspace); 425 if (IS_ERR(vma)) 426 return PTR_ERR(vma); 427 428 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 429 range_start, range_end); 430 if (ret) { 431 del_vma(vma); 432 return ret; 433 } 434 } 435 436 *iova = vma->iova; 437 return 0; 438 } 439 440 static int msm_gem_pin_iova(struct drm_gem_object *obj, 441 struct msm_gem_address_space *aspace) 442 { 443 struct msm_gem_object *msm_obj = to_msm_bo(obj); 444 struct msm_gem_vma *vma; 445 struct page **pages; 446 int ret, prot = IOMMU_READ; 447 448 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 449 prot |= IOMMU_WRITE; 450 451 if (msm_obj->flags & MSM_BO_MAP_PRIV) 452 prot |= IOMMU_PRIV; 453 454 GEM_WARN_ON(!msm_gem_is_locked(obj)); 455 456 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 457 return -EBUSY; 458 459 vma = lookup_vma(obj, aspace); 460 if (GEM_WARN_ON(!vma)) 461 return -EINVAL; 462 463 pages = get_pages(obj); 464 if (IS_ERR(pages)) 465 return PTR_ERR(pages); 466 467 ret = msm_gem_map_vma(aspace, vma, prot, 468 msm_obj->sgt, obj->size >> PAGE_SHIFT); 469 470 if (!ret) 471 msm_obj->pin_count++; 472 473 return ret; 474 } 475 476 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 477 struct msm_gem_address_space *aspace, uint64_t *iova, 478 u64 range_start, u64 range_end) 479 { 480 u64 local; 481 int ret; 482 483 GEM_WARN_ON(!msm_gem_is_locked(obj)); 484 485 ret = get_iova_locked(obj, aspace, &local, 486 range_start, range_end); 487 488 if (!ret) 489 ret = msm_gem_pin_iova(obj, aspace); 490 491 if (!ret) 492 *iova = local; 493 494 return ret; 495 } 496 497 /* 498 * get iova and pin it. Should have a matching put 499 * limits iova to specified range (in pages) 500 */ 501 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 502 struct msm_gem_address_space *aspace, uint64_t *iova, 503 u64 range_start, u64 range_end) 504 { 505 int ret; 506 507 msm_gem_lock(obj); 508 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 509 msm_gem_unlock(obj); 510 511 return ret; 512 } 513 514 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 515 struct msm_gem_address_space *aspace, uint64_t *iova) 516 { 517 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 518 } 519 520 /* get iova and pin it. Should have a matching put */ 521 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 522 struct msm_gem_address_space *aspace, uint64_t *iova) 523 { 524 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 525 } 526 527 /* 528 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 529 * valid for the life of the object 530 */ 531 int msm_gem_get_iova(struct drm_gem_object *obj, 532 struct msm_gem_address_space *aspace, uint64_t *iova) 533 { 534 int ret; 535 536 msm_gem_lock(obj); 537 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 538 msm_gem_unlock(obj); 539 540 return ret; 541 } 542 543 /* get iova without taking a reference, used in places where you have 544 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 545 */ 546 uint64_t msm_gem_iova(struct drm_gem_object *obj, 547 struct msm_gem_address_space *aspace) 548 { 549 struct msm_gem_vma *vma; 550 551 msm_gem_lock(obj); 552 vma = lookup_vma(obj, aspace); 553 msm_gem_unlock(obj); 554 GEM_WARN_ON(!vma); 555 556 return vma ? vma->iova : 0; 557 } 558 559 /* 560 * Locked variant of msm_gem_unpin_iova() 561 */ 562 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 563 struct msm_gem_address_space *aspace) 564 { 565 struct msm_gem_object *msm_obj = to_msm_bo(obj); 566 struct msm_gem_vma *vma; 567 568 GEM_WARN_ON(!msm_gem_is_locked(obj)); 569 570 vma = lookup_vma(obj, aspace); 571 572 if (!GEM_WARN_ON(!vma)) { 573 msm_gem_unmap_vma(aspace, vma); 574 575 msm_obj->pin_count--; 576 GEM_WARN_ON(msm_obj->pin_count < 0); 577 578 update_inactive(msm_obj); 579 } 580 } 581 582 /* 583 * Unpin a iova by updating the reference counts. The memory isn't actually 584 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 585 * to get rid of it 586 */ 587 void msm_gem_unpin_iova(struct drm_gem_object *obj, 588 struct msm_gem_address_space *aspace) 589 { 590 msm_gem_lock(obj); 591 msm_gem_unpin_iova_locked(obj, aspace); 592 msm_gem_unlock(obj); 593 } 594 595 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 596 struct drm_mode_create_dumb *args) 597 { 598 args->pitch = align_pitch(args->width, args->bpp); 599 args->size = PAGE_ALIGN(args->pitch * args->height); 600 return msm_gem_new_handle(dev, file, args->size, 601 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 602 } 603 604 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 605 uint32_t handle, uint64_t *offset) 606 { 607 struct drm_gem_object *obj; 608 int ret = 0; 609 610 /* GEM does all our handle to object mapping */ 611 obj = drm_gem_object_lookup(file, handle); 612 if (obj == NULL) { 613 ret = -ENOENT; 614 goto fail; 615 } 616 617 *offset = msm_gem_mmap_offset(obj); 618 619 drm_gem_object_put(obj); 620 621 fail: 622 return ret; 623 } 624 625 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 626 { 627 struct msm_gem_object *msm_obj = to_msm_bo(obj); 628 int ret = 0; 629 630 GEM_WARN_ON(!msm_gem_is_locked(obj)); 631 632 if (obj->import_attach) 633 return ERR_PTR(-ENODEV); 634 635 if (GEM_WARN_ON(msm_obj->madv > madv)) { 636 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 637 msm_obj->madv, madv); 638 return ERR_PTR(-EBUSY); 639 } 640 641 /* increment vmap_count *before* vmap() call, so shrinker can 642 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 643 * This guarantees that we won't try to msm_gem_vunmap() this 644 * same object from within the vmap() call (while we already 645 * hold msm_obj lock) 646 */ 647 msm_obj->vmap_count++; 648 649 if (!msm_obj->vaddr) { 650 struct page **pages = get_pages(obj); 651 if (IS_ERR(pages)) { 652 ret = PTR_ERR(pages); 653 goto fail; 654 } 655 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 656 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 657 if (msm_obj->vaddr == NULL) { 658 ret = -ENOMEM; 659 goto fail; 660 } 661 662 update_inactive(msm_obj); 663 } 664 665 return msm_obj->vaddr; 666 667 fail: 668 msm_obj->vmap_count--; 669 return ERR_PTR(ret); 670 } 671 672 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 673 { 674 return get_vaddr(obj, MSM_MADV_WILLNEED); 675 } 676 677 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 678 { 679 void *ret; 680 681 msm_gem_lock(obj); 682 ret = msm_gem_get_vaddr_locked(obj); 683 msm_gem_unlock(obj); 684 685 return ret; 686 } 687 688 /* 689 * Don't use this! It is for the very special case of dumping 690 * submits from GPU hangs or faults, were the bo may already 691 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 692 * active list. 693 */ 694 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 695 { 696 return get_vaddr(obj, __MSM_MADV_PURGED); 697 } 698 699 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 700 { 701 struct msm_gem_object *msm_obj = to_msm_bo(obj); 702 703 GEM_WARN_ON(!msm_gem_is_locked(obj)); 704 GEM_WARN_ON(msm_obj->vmap_count < 1); 705 706 msm_obj->vmap_count--; 707 } 708 709 void msm_gem_put_vaddr(struct drm_gem_object *obj) 710 { 711 msm_gem_lock(obj); 712 msm_gem_put_vaddr_locked(obj); 713 msm_gem_unlock(obj); 714 } 715 716 /* Update madvise status, returns true if not purged, else 717 * false or -errno. 718 */ 719 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 720 { 721 struct msm_gem_object *msm_obj = to_msm_bo(obj); 722 723 msm_gem_lock(obj); 724 725 if (msm_obj->madv != __MSM_MADV_PURGED) 726 msm_obj->madv = madv; 727 728 madv = msm_obj->madv; 729 730 /* If the obj is inactive, we might need to move it 731 * between inactive lists 732 */ 733 if (msm_obj->active_count == 0) 734 update_inactive(msm_obj); 735 736 msm_gem_unlock(obj); 737 738 return (madv != __MSM_MADV_PURGED); 739 } 740 741 void msm_gem_purge(struct drm_gem_object *obj) 742 { 743 struct drm_device *dev = obj->dev; 744 struct msm_gem_object *msm_obj = to_msm_bo(obj); 745 746 GEM_WARN_ON(!msm_gem_is_locked(obj)); 747 GEM_WARN_ON(!is_purgeable(msm_obj)); 748 749 /* Get rid of any iommu mapping(s): */ 750 put_iova_spaces(obj, true); 751 752 msm_gem_vunmap(obj); 753 754 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 755 756 put_pages(obj); 757 758 put_iova_vmas(obj); 759 760 msm_obj->madv = __MSM_MADV_PURGED; 761 update_inactive(msm_obj); 762 763 drm_gem_free_mmap_offset(obj); 764 765 /* Our goal here is to return as much of the memory as 766 * is possible back to the system as we are called from OOM. 767 * To do this we must instruct the shmfs to drop all of its 768 * backing pages, *now*. 769 */ 770 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 771 772 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 773 0, (loff_t)-1); 774 } 775 776 /** 777 * Unpin the backing pages and make them available to be swapped out. 778 */ 779 void msm_gem_evict(struct drm_gem_object *obj) 780 { 781 struct drm_device *dev = obj->dev; 782 struct msm_gem_object *msm_obj = to_msm_bo(obj); 783 784 GEM_WARN_ON(!msm_gem_is_locked(obj)); 785 GEM_WARN_ON(is_unevictable(msm_obj)); 786 GEM_WARN_ON(!msm_obj->evictable); 787 GEM_WARN_ON(msm_obj->active_count); 788 789 /* Get rid of any iommu mapping(s): */ 790 put_iova_spaces(obj, false); 791 792 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 793 794 put_pages(obj); 795 796 update_inactive(msm_obj); 797 } 798 799 void msm_gem_vunmap(struct drm_gem_object *obj) 800 { 801 struct msm_gem_object *msm_obj = to_msm_bo(obj); 802 803 GEM_WARN_ON(!msm_gem_is_locked(obj)); 804 805 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 806 return; 807 808 vunmap(msm_obj->vaddr); 809 msm_obj->vaddr = NULL; 810 } 811 812 /* must be called before _move_to_active().. */ 813 int msm_gem_sync_object(struct drm_gem_object *obj, 814 struct msm_fence_context *fctx, bool exclusive) 815 { 816 struct dma_resv_list *fobj; 817 struct dma_fence *fence; 818 int i, ret; 819 820 fobj = dma_resv_get_list(obj->resv); 821 if (!fobj || (fobj->shared_count == 0)) { 822 fence = dma_resv_get_excl(obj->resv); 823 /* don't need to wait on our own fences, since ring is fifo */ 824 if (fence && (fence->context != fctx->context)) { 825 ret = dma_fence_wait(fence, true); 826 if (ret) 827 return ret; 828 } 829 } 830 831 if (!exclusive || !fobj) 832 return 0; 833 834 for (i = 0; i < fobj->shared_count; i++) { 835 fence = rcu_dereference_protected(fobj->shared[i], 836 dma_resv_held(obj->resv)); 837 if (fence->context != fctx->context) { 838 ret = dma_fence_wait(fence, true); 839 if (ret) 840 return ret; 841 } 842 } 843 844 return 0; 845 } 846 847 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 848 { 849 struct msm_gem_object *msm_obj = to_msm_bo(obj); 850 struct msm_drm_private *priv = obj->dev->dev_private; 851 852 might_sleep(); 853 GEM_WARN_ON(!msm_gem_is_locked(obj)); 854 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 855 GEM_WARN_ON(msm_obj->dontneed); 856 GEM_WARN_ON(!msm_obj->sgt); 857 858 if (msm_obj->active_count++ == 0) { 859 mutex_lock(&priv->mm_lock); 860 if (msm_obj->evictable) 861 mark_unevictable(msm_obj); 862 list_del(&msm_obj->mm_list); 863 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 864 mutex_unlock(&priv->mm_lock); 865 } 866 } 867 868 void msm_gem_active_put(struct drm_gem_object *obj) 869 { 870 struct msm_gem_object *msm_obj = to_msm_bo(obj); 871 872 might_sleep(); 873 GEM_WARN_ON(!msm_gem_is_locked(obj)); 874 875 if (--msm_obj->active_count == 0) { 876 update_inactive(msm_obj); 877 } 878 } 879 880 static void update_inactive(struct msm_gem_object *msm_obj) 881 { 882 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 883 884 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 885 886 if (msm_obj->active_count != 0) 887 return; 888 889 mutex_lock(&priv->mm_lock); 890 891 if (msm_obj->dontneed) 892 mark_unpurgeable(msm_obj); 893 if (msm_obj->evictable) 894 mark_unevictable(msm_obj); 895 896 list_del(&msm_obj->mm_list); 897 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { 898 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 899 mark_evictable(msm_obj); 900 } else if (msm_obj->madv == MSM_MADV_DONTNEED) { 901 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 902 mark_purgeable(msm_obj); 903 } else { 904 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); 905 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 906 } 907 908 mutex_unlock(&priv->mm_lock); 909 } 910 911 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 912 { 913 bool write = !!(op & MSM_PREP_WRITE); 914 unsigned long remain = 915 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 916 long ret; 917 918 ret = dma_resv_wait_timeout_rcu(obj->resv, write, 919 true, remain); 920 if (ret == 0) 921 return remain == 0 ? -EBUSY : -ETIMEDOUT; 922 else if (ret < 0) 923 return ret; 924 925 /* TODO cache maintenance */ 926 927 return 0; 928 } 929 930 int msm_gem_cpu_fini(struct drm_gem_object *obj) 931 { 932 /* TODO cache maintenance */ 933 return 0; 934 } 935 936 #ifdef CONFIG_DEBUG_FS 937 static void describe_fence(struct dma_fence *fence, const char *type, 938 struct seq_file *m) 939 { 940 if (!dma_fence_is_signaled(fence)) 941 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 942 fence->ops->get_driver_name(fence), 943 fence->ops->get_timeline_name(fence), 944 fence->seqno); 945 } 946 947 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 948 struct msm_gem_stats *stats) 949 { 950 struct msm_gem_object *msm_obj = to_msm_bo(obj); 951 struct dma_resv *robj = obj->resv; 952 struct dma_resv_list *fobj; 953 struct dma_fence *fence; 954 struct msm_gem_vma *vma; 955 uint64_t off = drm_vma_node_start(&obj->vma_node); 956 const char *madv; 957 958 msm_gem_lock(obj); 959 960 stats->all.count++; 961 stats->all.size += obj->size; 962 963 if (is_active(msm_obj)) { 964 stats->active.count++; 965 stats->active.size += obj->size; 966 } 967 968 if (msm_obj->pages) { 969 stats->resident.count++; 970 stats->resident.size += obj->size; 971 } 972 973 switch (msm_obj->madv) { 974 case __MSM_MADV_PURGED: 975 stats->purged.count++; 976 stats->purged.size += obj->size; 977 madv = " purged"; 978 break; 979 case MSM_MADV_DONTNEED: 980 stats->purgeable.count++; 981 stats->purgeable.size += obj->size; 982 madv = " purgeable"; 983 break; 984 case MSM_MADV_WILLNEED: 985 default: 986 madv = ""; 987 break; 988 } 989 990 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 991 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 992 obj->name, kref_read(&obj->refcount), 993 off, msm_obj->vaddr); 994 995 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 996 997 if (!list_empty(&msm_obj->vmas)) { 998 999 seq_puts(m, " vmas:"); 1000 1001 list_for_each_entry(vma, &msm_obj->vmas, list) { 1002 const char *name, *comm; 1003 if (vma->aspace) { 1004 struct msm_gem_address_space *aspace = vma->aspace; 1005 struct task_struct *task = 1006 get_pid_task(aspace->pid, PIDTYPE_PID); 1007 if (task) { 1008 comm = kstrdup(task->comm, GFP_KERNEL); 1009 } else { 1010 comm = NULL; 1011 } 1012 name = aspace->name; 1013 } else { 1014 name = comm = NULL; 1015 } 1016 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 1017 name, comm ? ":" : "", comm ? comm : "", 1018 vma->aspace, vma->iova, 1019 vma->mapped ? "mapped" : "unmapped", 1020 vma->inuse); 1021 kfree(comm); 1022 } 1023 1024 seq_puts(m, "\n"); 1025 } 1026 1027 rcu_read_lock(); 1028 fobj = rcu_dereference(robj->fence); 1029 if (fobj) { 1030 unsigned int i, shared_count = fobj->shared_count; 1031 1032 for (i = 0; i < shared_count; i++) { 1033 fence = rcu_dereference(fobj->shared[i]); 1034 describe_fence(fence, "Shared", m); 1035 } 1036 } 1037 1038 fence = rcu_dereference(robj->fence_excl); 1039 if (fence) 1040 describe_fence(fence, "Exclusive", m); 1041 rcu_read_unlock(); 1042 1043 msm_gem_unlock(obj); 1044 } 1045 1046 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 1047 { 1048 struct msm_gem_stats stats = {}; 1049 struct msm_gem_object *msm_obj; 1050 1051 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 1052 list_for_each_entry(msm_obj, list, node) { 1053 struct drm_gem_object *obj = &msm_obj->base; 1054 seq_puts(m, " "); 1055 msm_gem_describe(obj, m, &stats); 1056 } 1057 1058 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 1059 stats.all.count, stats.all.size); 1060 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 1061 stats.active.count, stats.active.size); 1062 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 1063 stats.resident.count, stats.resident.size); 1064 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 1065 stats.purgeable.count, stats.purgeable.size); 1066 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 1067 stats.purged.count, stats.purged.size); 1068 } 1069 #endif 1070 1071 /* don't call directly! Use drm_gem_object_put_locked() and friends */ 1072 void msm_gem_free_object(struct drm_gem_object *obj) 1073 { 1074 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1075 struct drm_device *dev = obj->dev; 1076 struct msm_drm_private *priv = dev->dev_private; 1077 1078 mutex_lock(&priv->obj_lock); 1079 list_del(&msm_obj->node); 1080 mutex_unlock(&priv->obj_lock); 1081 1082 mutex_lock(&priv->mm_lock); 1083 if (msm_obj->dontneed) 1084 mark_unpurgeable(msm_obj); 1085 list_del(&msm_obj->mm_list); 1086 mutex_unlock(&priv->mm_lock); 1087 1088 msm_gem_lock(obj); 1089 1090 /* object should not be on active list: */ 1091 GEM_WARN_ON(is_active(msm_obj)); 1092 1093 put_iova_spaces(obj, true); 1094 1095 if (obj->import_attach) { 1096 GEM_WARN_ON(msm_obj->vaddr); 1097 1098 /* Don't drop the pages for imported dmabuf, as they are not 1099 * ours, just free the array we allocated: 1100 */ 1101 kvfree(msm_obj->pages); 1102 1103 put_iova_vmas(obj); 1104 1105 /* dma_buf_detach() grabs resv lock, so we need to unlock 1106 * prior to drm_prime_gem_destroy 1107 */ 1108 msm_gem_unlock(obj); 1109 1110 drm_prime_gem_destroy(obj, msm_obj->sgt); 1111 } else { 1112 msm_gem_vunmap(obj); 1113 put_pages(obj); 1114 put_iova_vmas(obj); 1115 msm_gem_unlock(obj); 1116 } 1117 1118 drm_gem_object_release(obj); 1119 1120 kfree(msm_obj); 1121 } 1122 1123 /* convenience method to construct a GEM buffer object, and userspace handle */ 1124 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1125 uint32_t size, uint32_t flags, uint32_t *handle, 1126 char *name) 1127 { 1128 struct drm_gem_object *obj; 1129 int ret; 1130 1131 obj = msm_gem_new(dev, size, flags); 1132 1133 if (IS_ERR(obj)) 1134 return PTR_ERR(obj); 1135 1136 if (name) 1137 msm_gem_object_set_name(obj, "%s", name); 1138 1139 ret = drm_gem_handle_create(file, obj, handle); 1140 1141 /* drop reference from allocate - handle holds it now */ 1142 drm_gem_object_put(obj); 1143 1144 return ret; 1145 } 1146 1147 static const struct vm_operations_struct vm_ops = { 1148 .fault = msm_gem_fault, 1149 .open = drm_gem_vm_open, 1150 .close = drm_gem_vm_close, 1151 }; 1152 1153 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1154 .free = msm_gem_free_object, 1155 .pin = msm_gem_prime_pin, 1156 .unpin = msm_gem_prime_unpin, 1157 .get_sg_table = msm_gem_prime_get_sg_table, 1158 .vmap = msm_gem_prime_vmap, 1159 .vunmap = msm_gem_prime_vunmap, 1160 .vm_ops = &vm_ops, 1161 }; 1162 1163 static int msm_gem_new_impl(struct drm_device *dev, 1164 uint32_t size, uint32_t flags, 1165 struct drm_gem_object **obj) 1166 { 1167 struct msm_gem_object *msm_obj; 1168 1169 switch (flags & MSM_BO_CACHE_MASK) { 1170 case MSM_BO_UNCACHED: 1171 case MSM_BO_CACHED: 1172 case MSM_BO_WC: 1173 break; 1174 default: 1175 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1176 (flags & MSM_BO_CACHE_MASK)); 1177 return -EINVAL; 1178 } 1179 1180 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1181 if (!msm_obj) 1182 return -ENOMEM; 1183 1184 msm_obj->flags = flags; 1185 msm_obj->madv = MSM_MADV_WILLNEED; 1186 1187 INIT_LIST_HEAD(&msm_obj->submit_entry); 1188 INIT_LIST_HEAD(&msm_obj->vmas); 1189 1190 *obj = &msm_obj->base; 1191 (*obj)->funcs = &msm_gem_object_funcs; 1192 1193 return 0; 1194 } 1195 1196 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 1197 uint32_t size, uint32_t flags, bool struct_mutex_locked) 1198 { 1199 struct msm_drm_private *priv = dev->dev_private; 1200 struct msm_gem_object *msm_obj; 1201 struct drm_gem_object *obj = NULL; 1202 bool use_vram = false; 1203 int ret; 1204 1205 size = PAGE_ALIGN(size); 1206 1207 if (!msm_use_mmu(dev)) 1208 use_vram = true; 1209 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1210 use_vram = true; 1211 1212 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1213 return ERR_PTR(-EINVAL); 1214 1215 /* Disallow zero sized objects as they make the underlying 1216 * infrastructure grumpy 1217 */ 1218 if (size == 0) 1219 return ERR_PTR(-EINVAL); 1220 1221 ret = msm_gem_new_impl(dev, size, flags, &obj); 1222 if (ret) 1223 goto fail; 1224 1225 msm_obj = to_msm_bo(obj); 1226 1227 if (use_vram) { 1228 struct msm_gem_vma *vma; 1229 struct page **pages; 1230 1231 drm_gem_private_object_init(dev, obj, size); 1232 1233 msm_gem_lock(obj); 1234 1235 vma = add_vma(obj, NULL); 1236 msm_gem_unlock(obj); 1237 if (IS_ERR(vma)) { 1238 ret = PTR_ERR(vma); 1239 goto fail; 1240 } 1241 1242 to_msm_bo(obj)->vram_node = &vma->node; 1243 1244 msm_gem_lock(obj); 1245 pages = get_pages(obj); 1246 msm_gem_unlock(obj); 1247 if (IS_ERR(pages)) { 1248 ret = PTR_ERR(pages); 1249 goto fail; 1250 } 1251 1252 vma->iova = physaddr(obj); 1253 } else { 1254 ret = drm_gem_object_init(dev, obj, size); 1255 if (ret) 1256 goto fail; 1257 /* 1258 * Our buffers are kept pinned, so allocating them from the 1259 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1260 * See comments above new_inode() why this is required _and_ 1261 * expected if you're going to pin these pages. 1262 */ 1263 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1264 } 1265 1266 mutex_lock(&priv->mm_lock); 1267 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1268 mutex_unlock(&priv->mm_lock); 1269 1270 mutex_lock(&priv->obj_lock); 1271 list_add_tail(&msm_obj->node, &priv->objects); 1272 mutex_unlock(&priv->obj_lock); 1273 1274 return obj; 1275 1276 fail: 1277 if (struct_mutex_locked) { 1278 drm_gem_object_put_locked(obj); 1279 } else { 1280 drm_gem_object_put(obj); 1281 } 1282 return ERR_PTR(ret); 1283 } 1284 1285 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1286 uint32_t size, uint32_t flags) 1287 { 1288 return _msm_gem_new(dev, size, flags, true); 1289 } 1290 1291 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1292 uint32_t size, uint32_t flags) 1293 { 1294 return _msm_gem_new(dev, size, flags, false); 1295 } 1296 1297 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1298 struct dma_buf *dmabuf, struct sg_table *sgt) 1299 { 1300 struct msm_drm_private *priv = dev->dev_private; 1301 struct msm_gem_object *msm_obj; 1302 struct drm_gem_object *obj; 1303 uint32_t size; 1304 int ret, npages; 1305 1306 /* if we don't have IOMMU, don't bother pretending we can import: */ 1307 if (!msm_use_mmu(dev)) { 1308 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1309 return ERR_PTR(-EINVAL); 1310 } 1311 1312 size = PAGE_ALIGN(dmabuf->size); 1313 1314 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1315 if (ret) 1316 goto fail; 1317 1318 drm_gem_private_object_init(dev, obj, size); 1319 1320 npages = size / PAGE_SIZE; 1321 1322 msm_obj = to_msm_bo(obj); 1323 msm_gem_lock(obj); 1324 msm_obj->sgt = sgt; 1325 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1326 if (!msm_obj->pages) { 1327 msm_gem_unlock(obj); 1328 ret = -ENOMEM; 1329 goto fail; 1330 } 1331 1332 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1333 if (ret) { 1334 msm_gem_unlock(obj); 1335 goto fail; 1336 } 1337 1338 msm_gem_unlock(obj); 1339 1340 mutex_lock(&priv->mm_lock); 1341 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1342 mutex_unlock(&priv->mm_lock); 1343 1344 mutex_lock(&priv->obj_lock); 1345 list_add_tail(&msm_obj->node, &priv->objects); 1346 mutex_unlock(&priv->obj_lock); 1347 1348 return obj; 1349 1350 fail: 1351 drm_gem_object_put(obj); 1352 return ERR_PTR(ret); 1353 } 1354 1355 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1356 uint32_t flags, struct msm_gem_address_space *aspace, 1357 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1358 { 1359 void *vaddr; 1360 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1361 int ret; 1362 1363 if (IS_ERR(obj)) 1364 return ERR_CAST(obj); 1365 1366 if (iova) { 1367 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1368 if (ret) 1369 goto err; 1370 } 1371 1372 vaddr = msm_gem_get_vaddr(obj); 1373 if (IS_ERR(vaddr)) { 1374 msm_gem_unpin_iova(obj, aspace); 1375 ret = PTR_ERR(vaddr); 1376 goto err; 1377 } 1378 1379 if (bo) 1380 *bo = obj; 1381 1382 return vaddr; 1383 err: 1384 if (locked) 1385 drm_gem_object_put_locked(obj); 1386 else 1387 drm_gem_object_put(obj); 1388 1389 return ERR_PTR(ret); 1390 1391 } 1392 1393 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1394 uint32_t flags, struct msm_gem_address_space *aspace, 1395 struct drm_gem_object **bo, uint64_t *iova) 1396 { 1397 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1398 } 1399 1400 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1401 uint32_t flags, struct msm_gem_address_space *aspace, 1402 struct drm_gem_object **bo, uint64_t *iova) 1403 { 1404 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1405 } 1406 1407 void msm_gem_kernel_put(struct drm_gem_object *bo, 1408 struct msm_gem_address_space *aspace, bool locked) 1409 { 1410 if (IS_ERR_OR_NULL(bo)) 1411 return; 1412 1413 msm_gem_put_vaddr(bo); 1414 msm_gem_unpin_iova(bo, aspace); 1415 1416 if (locked) 1417 drm_gem_object_put_locked(bo); 1418 else 1419 drm_gem_object_put(bo); 1420 } 1421 1422 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1423 { 1424 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1425 va_list ap; 1426 1427 if (!fmt) 1428 return; 1429 1430 va_start(ap, fmt); 1431 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1432 va_end(ap); 1433 } 1434