1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/file.h> 8 #include <linux/sync_file.h> 9 #include <linux/uaccess.h> 10 11 #include <drm/drm_drv.h> 12 #include <drm/drm_file.h> 13 #include <drm/drm_syncobj.h> 14 15 #include "msm_drv.h" 16 #include "msm_gpu.h" 17 #include "msm_gem.h" 18 #include "msm_gpu_trace.h" 19 20 /* 21 * Cmdstream submission: 22 */ 23 24 static struct msm_gem_submit *submit_create(struct drm_device *dev, 25 struct msm_gpu *gpu, 26 struct msm_gpu_submitqueue *queue, uint32_t nr_bos, 27 uint32_t nr_cmds) 28 { 29 static atomic_t ident = ATOMIC_INIT(0); 30 struct msm_gem_submit *submit; 31 uint64_t sz; 32 int ret; 33 34 sz = struct_size(submit, bos, nr_bos) + 35 ((u64)nr_cmds * sizeof(submit->cmd[0])); 36 37 if (sz > SIZE_MAX) 38 return ERR_PTR(-ENOMEM); 39 40 submit = kzalloc(sz, GFP_KERNEL); 41 if (!submit) 42 return ERR_PTR(-ENOMEM); 43 44 submit->hw_fence = msm_fence_alloc(); 45 if (IS_ERR(submit->hw_fence)) { 46 ret = PTR_ERR(submit->hw_fence); 47 kfree(submit); 48 return ERR_PTR(ret); 49 } 50 51 ret = drm_sched_job_init(&submit->base, queue->entity, queue); 52 if (ret) { 53 kfree(submit->hw_fence); 54 kfree(submit); 55 return ERR_PTR(ret); 56 } 57 58 kref_init(&submit->ref); 59 submit->dev = dev; 60 submit->aspace = queue->ctx->aspace; 61 submit->gpu = gpu; 62 submit->cmd = (void *)&submit->bos[nr_bos]; 63 submit->queue = queue; 64 submit->pid = get_pid(task_pid(current)); 65 submit->ring = gpu->rb[queue->ring_nr]; 66 submit->fault_dumped = false; 67 68 /* Get a unique identifier for the submission for logging purposes */ 69 submit->ident = atomic_inc_return(&ident) - 1; 70 71 INIT_LIST_HEAD(&submit->node); 72 73 return submit; 74 } 75 76 void __msm_gem_submit_destroy(struct kref *kref) 77 { 78 struct msm_gem_submit *submit = 79 container_of(kref, struct msm_gem_submit, ref); 80 unsigned i; 81 82 if (submit->fence_id) { 83 spin_lock(&submit->queue->idr_lock); 84 idr_remove(&submit->queue->fence_idr, submit->fence_id); 85 spin_unlock(&submit->queue->idr_lock); 86 } 87 88 dma_fence_put(submit->user_fence); 89 dma_fence_put(submit->hw_fence); 90 91 put_pid(submit->pid); 92 msm_submitqueue_put(submit->queue); 93 94 for (i = 0; i < submit->nr_cmds; i++) 95 kfree(submit->cmd[i].relocs); 96 97 kfree(submit); 98 } 99 100 static int submit_lookup_objects(struct msm_gem_submit *submit, 101 struct drm_msm_gem_submit *args, struct drm_file *file) 102 { 103 unsigned i; 104 int ret = 0; 105 106 for (i = 0; i < args->nr_bos; i++) { 107 struct drm_msm_gem_submit_bo submit_bo; 108 void __user *userptr = 109 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); 110 111 /* make sure we don't have garbage flags, in case we hit 112 * error path before flags is initialized: 113 */ 114 submit->bos[i].flags = 0; 115 116 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { 117 ret = -EFAULT; 118 i = 0; 119 goto out; 120 } 121 122 /* at least one of READ and/or WRITE flags should be set: */ 123 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) 124 125 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || 126 !(submit_bo.flags & MANDATORY_FLAGS)) { 127 DRM_ERROR("invalid flags: %x\n", submit_bo.flags); 128 ret = -EINVAL; 129 i = 0; 130 goto out; 131 } 132 133 submit->bos[i].handle = submit_bo.handle; 134 submit->bos[i].flags = submit_bo.flags; 135 /* in validate_objects() we figure out if this is true: */ 136 submit->bos[i].iova = submit_bo.presumed; 137 } 138 139 spin_lock(&file->table_lock); 140 141 for (i = 0; i < args->nr_bos; i++) { 142 struct drm_gem_object *obj; 143 144 /* normally use drm_gem_object_lookup(), but for bulk lookup 145 * all under single table_lock just hit object_idr directly: 146 */ 147 obj = idr_find(&file->object_idr, submit->bos[i].handle); 148 if (!obj) { 149 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i); 150 ret = -EINVAL; 151 goto out_unlock; 152 } 153 154 drm_gem_object_get(obj); 155 156 submit->bos[i].obj = to_msm_bo(obj); 157 } 158 159 out_unlock: 160 spin_unlock(&file->table_lock); 161 162 out: 163 submit->nr_bos = i; 164 165 return ret; 166 } 167 168 static int submit_lookup_cmds(struct msm_gem_submit *submit, 169 struct drm_msm_gem_submit *args, struct drm_file *file) 170 { 171 unsigned i; 172 size_t sz; 173 int ret = 0; 174 175 for (i = 0; i < args->nr_cmds; i++) { 176 struct drm_msm_gem_submit_cmd submit_cmd; 177 void __user *userptr = 178 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); 179 180 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); 181 if (ret) { 182 ret = -EFAULT; 183 goto out; 184 } 185 186 /* validate input from userspace: */ 187 switch (submit_cmd.type) { 188 case MSM_SUBMIT_CMD_BUF: 189 case MSM_SUBMIT_CMD_IB_TARGET_BUF: 190 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 191 break; 192 default: 193 DRM_ERROR("invalid type: %08x\n", submit_cmd.type); 194 return -EINVAL; 195 } 196 197 if (submit_cmd.size % 4) { 198 DRM_ERROR("non-aligned cmdstream buffer size: %u\n", 199 submit_cmd.size); 200 ret = -EINVAL; 201 goto out; 202 } 203 204 submit->cmd[i].type = submit_cmd.type; 205 submit->cmd[i].size = submit_cmd.size / 4; 206 submit->cmd[i].offset = submit_cmd.submit_offset / 4; 207 submit->cmd[i].idx = submit_cmd.submit_idx; 208 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs; 209 210 userptr = u64_to_user_ptr(submit_cmd.relocs); 211 212 sz = array_size(submit_cmd.nr_relocs, 213 sizeof(struct drm_msm_gem_submit_reloc)); 214 /* check for overflow: */ 215 if (sz == SIZE_MAX) { 216 ret = -ENOMEM; 217 goto out; 218 } 219 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL); 220 if (!submit->cmd[i].relocs) { 221 ret = -ENOMEM; 222 goto out; 223 } 224 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz); 225 if (ret) { 226 ret = -EFAULT; 227 goto out; 228 } 229 } 230 231 out: 232 return ret; 233 } 234 235 /* Unwind bo state, according to cleanup_flags. In the success case, only 236 * the lock is dropped at the end of the submit (and active/pin ref is dropped 237 * later when the submit is retired). 238 */ 239 static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, 240 unsigned cleanup_flags) 241 { 242 struct drm_gem_object *obj = &submit->bos[i].obj->base; 243 unsigned flags = submit->bos[i].flags & cleanup_flags; 244 245 /* 246 * Clear flags bit before dropping lock, so that the msm_job_run() 247 * path isn't racing with submit_cleanup() (ie. the read/modify/ 248 * write is protected by the obj lock in all paths) 249 */ 250 submit->bos[i].flags &= ~cleanup_flags; 251 252 if (flags & BO_VMA_PINNED) 253 msm_gem_vma_unpin(submit->bos[i].vma); 254 255 if (flags & BO_OBJ_PINNED) 256 msm_gem_unpin_locked(obj); 257 258 if (flags & BO_LOCKED) 259 dma_resv_unlock(obj->resv); 260 } 261 262 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) 263 { 264 unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED; 265 submit_cleanup_bo(submit, i, cleanup_flags); 266 267 if (!(submit->bos[i].flags & BO_VALID)) 268 submit->bos[i].iova = 0; 269 } 270 271 /* This is where we make sure all the bo's are reserved and pin'd: */ 272 static int submit_lock_objects(struct msm_gem_submit *submit) 273 { 274 int contended, slow_locked = -1, i, ret = 0; 275 276 retry: 277 for (i = 0; i < submit->nr_bos; i++) { 278 struct msm_gem_object *msm_obj = submit->bos[i].obj; 279 280 if (slow_locked == i) 281 slow_locked = -1; 282 283 contended = i; 284 285 if (!(submit->bos[i].flags & BO_LOCKED)) { 286 ret = dma_resv_lock_interruptible(msm_obj->base.resv, 287 &submit->ticket); 288 if (ret) 289 goto fail; 290 submit->bos[i].flags |= BO_LOCKED; 291 } 292 } 293 294 ww_acquire_done(&submit->ticket); 295 296 return 0; 297 298 fail: 299 if (ret == -EALREADY) { 300 DRM_ERROR("handle %u at index %u already on submit list\n", 301 submit->bos[i].handle, i); 302 ret = -EINVAL; 303 } 304 305 for (; i >= 0; i--) 306 submit_unlock_unpin_bo(submit, i); 307 308 if (slow_locked > 0) 309 submit_unlock_unpin_bo(submit, slow_locked); 310 311 if (ret == -EDEADLK) { 312 struct msm_gem_object *msm_obj = submit->bos[contended].obj; 313 /* we lost out in a seqno race, lock and retry.. */ 314 ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv, 315 &submit->ticket); 316 if (!ret) { 317 submit->bos[contended].flags |= BO_LOCKED; 318 slow_locked = contended; 319 goto retry; 320 } 321 322 /* Not expecting -EALREADY here, if the bo was already 323 * locked, we should have gotten -EALREADY already from 324 * the dma_resv_lock_interruptable() call. 325 */ 326 WARN_ON_ONCE(ret == -EALREADY); 327 } 328 329 return ret; 330 } 331 332 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) 333 { 334 int i, ret = 0; 335 336 for (i = 0; i < submit->nr_bos; i++) { 337 struct drm_gem_object *obj = &submit->bos[i].obj->base; 338 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; 339 340 /* NOTE: _reserve_shared() must happen before 341 * _add_shared_fence(), which makes this a slightly 342 * strange place to call it. OTOH this is a 343 * convenient can-fail point to hook it in. 344 */ 345 ret = dma_resv_reserve_fences(obj->resv, 1); 346 if (ret) 347 return ret; 348 349 /* If userspace has determined that explicit fencing is 350 * used, it can disable implicit sync on the entire 351 * submit: 352 */ 353 if (no_implicit) 354 continue; 355 356 /* Otherwise userspace can ask for implicit sync to be 357 * disabled on specific buffers. This is useful for internal 358 * usermode driver managed buffers, suballocation, etc. 359 */ 360 if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT) 361 continue; 362 363 ret = drm_sched_job_add_implicit_dependencies(&submit->base, 364 obj, 365 write); 366 if (ret) 367 break; 368 } 369 370 return ret; 371 } 372 373 static int submit_pin_objects(struct msm_gem_submit *submit) 374 { 375 int i, ret = 0; 376 377 submit->valid = true; 378 379 for (i = 0; i < submit->nr_bos; i++) { 380 struct drm_gem_object *obj = &submit->bos[i].obj->base; 381 struct msm_gem_vma *vma; 382 383 /* if locking succeeded, pin bo: */ 384 vma = msm_gem_get_vma_locked(obj, submit->aspace); 385 if (IS_ERR(vma)) { 386 ret = PTR_ERR(vma); 387 break; 388 } 389 390 ret = msm_gem_pin_vma_locked(obj, vma); 391 if (ret) 392 break; 393 394 submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED; 395 submit->bos[i].vma = vma; 396 397 if (vma->iova == submit->bos[i].iova) { 398 submit->bos[i].flags |= BO_VALID; 399 } else { 400 submit->bos[i].iova = vma->iova; 401 /* iova changed, so address in cmdstream is not valid: */ 402 submit->bos[i].flags &= ~BO_VALID; 403 submit->valid = false; 404 } 405 } 406 407 return ret; 408 } 409 410 static void submit_attach_object_fences(struct msm_gem_submit *submit) 411 { 412 int i; 413 414 for (i = 0; i < submit->nr_bos; i++) { 415 struct drm_gem_object *obj = &submit->bos[i].obj->base; 416 417 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 418 dma_resv_add_fence(obj->resv, submit->user_fence, 419 DMA_RESV_USAGE_WRITE); 420 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 421 dma_resv_add_fence(obj->resv, submit->user_fence, 422 DMA_RESV_USAGE_READ); 423 } 424 } 425 426 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, 427 struct msm_gem_object **obj, uint64_t *iova, bool *valid) 428 { 429 if (idx >= submit->nr_bos) { 430 DRM_ERROR("invalid buffer index: %u (out of %u)\n", 431 idx, submit->nr_bos); 432 return -EINVAL; 433 } 434 435 if (obj) 436 *obj = submit->bos[idx].obj; 437 if (iova) 438 *iova = submit->bos[idx].iova; 439 if (valid) 440 *valid = !!(submit->bos[idx].flags & BO_VALID); 441 442 return 0; 443 } 444 445 /* process the reloc's and patch up the cmdstream as needed: */ 446 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, 447 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs) 448 { 449 uint32_t i, last_offset = 0; 450 uint32_t *ptr; 451 int ret = 0; 452 453 if (!nr_relocs) 454 return 0; 455 456 if (offset % 4) { 457 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); 458 return -EINVAL; 459 } 460 461 /* For now, just map the entire thing. Eventually we probably 462 * to do it page-by-page, w/ kmap() if not vmap()d.. 463 */ 464 ptr = msm_gem_get_vaddr_locked(&obj->base); 465 466 if (IS_ERR(ptr)) { 467 ret = PTR_ERR(ptr); 468 DBG("failed to map: %d", ret); 469 return ret; 470 } 471 472 for (i = 0; i < nr_relocs; i++) { 473 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i]; 474 uint32_t off; 475 uint64_t iova; 476 bool valid; 477 478 if (submit_reloc.submit_offset % 4) { 479 DRM_ERROR("non-aligned reloc offset: %u\n", 480 submit_reloc.submit_offset); 481 ret = -EINVAL; 482 goto out; 483 } 484 485 /* offset in dwords: */ 486 off = submit_reloc.submit_offset / 4; 487 488 if ((off >= (obj->base.size / 4)) || 489 (off < last_offset)) { 490 DRM_ERROR("invalid offset %u at reloc %u\n", off, i); 491 ret = -EINVAL; 492 goto out; 493 } 494 495 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); 496 if (ret) 497 goto out; 498 499 if (valid) 500 continue; 501 502 iova += submit_reloc.reloc_offset; 503 504 if (submit_reloc.shift < 0) 505 iova >>= -submit_reloc.shift; 506 else 507 iova <<= submit_reloc.shift; 508 509 ptr[off] = iova | submit_reloc.or; 510 511 last_offset = off; 512 } 513 514 out: 515 msm_gem_put_vaddr_locked(&obj->base); 516 517 return ret; 518 } 519 520 /* Cleanup submit at end of ioctl. In the error case, this also drops 521 * references, unpins, and drops active refcnt. In the non-error case, 522 * this is done when the submit is retired. 523 */ 524 static void submit_cleanup(struct msm_gem_submit *submit, bool error) 525 { 526 unsigned cleanup_flags = BO_LOCKED; 527 unsigned i; 528 529 if (error) 530 cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED; 531 532 for (i = 0; i < submit->nr_bos; i++) { 533 struct msm_gem_object *msm_obj = submit->bos[i].obj; 534 submit_cleanup_bo(submit, i, cleanup_flags); 535 if (error) 536 drm_gem_object_put(&msm_obj->base); 537 } 538 } 539 540 void msm_submit_retire(struct msm_gem_submit *submit) 541 { 542 int i; 543 544 for (i = 0; i < submit->nr_bos; i++) { 545 struct drm_gem_object *obj = &submit->bos[i].obj->base; 546 547 drm_gem_object_put(obj); 548 } 549 } 550 551 struct msm_submit_post_dep { 552 struct drm_syncobj *syncobj; 553 uint64_t point; 554 struct dma_fence_chain *chain; 555 }; 556 557 static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit, 558 struct drm_file *file, 559 uint64_t in_syncobjs_addr, 560 uint32_t nr_in_syncobjs, 561 size_t syncobj_stride, 562 struct msm_ringbuffer *ring) 563 { 564 struct drm_syncobj **syncobjs = NULL; 565 struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; 566 int ret = 0; 567 uint32_t i, j; 568 569 syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs), 570 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 571 if (!syncobjs) 572 return ERR_PTR(-ENOMEM); 573 574 for (i = 0; i < nr_in_syncobjs; ++i) { 575 uint64_t address = in_syncobjs_addr + i * syncobj_stride; 576 577 if (copy_from_user(&syncobj_desc, 578 u64_to_user_ptr(address), 579 min(syncobj_stride, sizeof(syncobj_desc)))) { 580 ret = -EFAULT; 581 break; 582 } 583 584 if (syncobj_desc.point && 585 !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) { 586 ret = -EOPNOTSUPP; 587 break; 588 } 589 590 if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) { 591 ret = -EINVAL; 592 break; 593 } 594 595 ret = drm_sched_job_add_syncobj_dependency(&submit->base, file, 596 syncobj_desc.handle, syncobj_desc.point); 597 if (ret) 598 break; 599 600 if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) { 601 syncobjs[i] = 602 drm_syncobj_find(file, syncobj_desc.handle); 603 if (!syncobjs[i]) { 604 ret = -EINVAL; 605 break; 606 } 607 } 608 } 609 610 if (ret) { 611 for (j = 0; j <= i; ++j) { 612 if (syncobjs[j]) 613 drm_syncobj_put(syncobjs[j]); 614 } 615 kfree(syncobjs); 616 return ERR_PTR(ret); 617 } 618 return syncobjs; 619 } 620 621 static void msm_reset_syncobjs(struct drm_syncobj **syncobjs, 622 uint32_t nr_syncobjs) 623 { 624 uint32_t i; 625 626 for (i = 0; syncobjs && i < nr_syncobjs; ++i) { 627 if (syncobjs[i]) 628 drm_syncobj_replace_fence(syncobjs[i], NULL); 629 } 630 } 631 632 static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, 633 struct drm_file *file, 634 uint64_t syncobjs_addr, 635 uint32_t nr_syncobjs, 636 size_t syncobj_stride) 637 { 638 struct msm_submit_post_dep *post_deps; 639 struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; 640 int ret = 0; 641 uint32_t i, j; 642 643 post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), 644 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 645 if (!post_deps) 646 return ERR_PTR(-ENOMEM); 647 648 for (i = 0; i < nr_syncobjs; ++i) { 649 uint64_t address = syncobjs_addr + i * syncobj_stride; 650 651 if (copy_from_user(&syncobj_desc, 652 u64_to_user_ptr(address), 653 min(syncobj_stride, sizeof(syncobj_desc)))) { 654 ret = -EFAULT; 655 break; 656 } 657 658 post_deps[i].point = syncobj_desc.point; 659 660 if (syncobj_desc.flags) { 661 ret = -EINVAL; 662 break; 663 } 664 665 if (syncobj_desc.point) { 666 if (!drm_core_check_feature(dev, 667 DRIVER_SYNCOBJ_TIMELINE)) { 668 ret = -EOPNOTSUPP; 669 break; 670 } 671 672 post_deps[i].chain = dma_fence_chain_alloc(); 673 if (!post_deps[i].chain) { 674 ret = -ENOMEM; 675 break; 676 } 677 } 678 679 post_deps[i].syncobj = 680 drm_syncobj_find(file, syncobj_desc.handle); 681 if (!post_deps[i].syncobj) { 682 ret = -EINVAL; 683 break; 684 } 685 } 686 687 if (ret) { 688 for (j = 0; j <= i; ++j) { 689 dma_fence_chain_free(post_deps[j].chain); 690 if (post_deps[j].syncobj) 691 drm_syncobj_put(post_deps[j].syncobj); 692 } 693 694 kfree(post_deps); 695 return ERR_PTR(ret); 696 } 697 698 return post_deps; 699 } 700 701 static void msm_process_post_deps(struct msm_submit_post_dep *post_deps, 702 uint32_t count, struct dma_fence *fence) 703 { 704 uint32_t i; 705 706 for (i = 0; post_deps && i < count; ++i) { 707 if (post_deps[i].chain) { 708 drm_syncobj_add_point(post_deps[i].syncobj, 709 post_deps[i].chain, 710 fence, post_deps[i].point); 711 post_deps[i].chain = NULL; 712 } else { 713 drm_syncobj_replace_fence(post_deps[i].syncobj, 714 fence); 715 } 716 } 717 } 718 719 int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 720 struct drm_file *file) 721 { 722 struct msm_drm_private *priv = dev->dev_private; 723 struct drm_msm_gem_submit *args = data; 724 struct msm_file_private *ctx = file->driver_priv; 725 struct msm_gem_submit *submit = NULL; 726 struct msm_gpu *gpu = priv->gpu; 727 struct msm_gpu_submitqueue *queue; 728 struct msm_ringbuffer *ring; 729 struct msm_submit_post_dep *post_deps = NULL; 730 struct drm_syncobj **syncobjs_to_reset = NULL; 731 int out_fence_fd = -1; 732 bool has_ww_ticket = false; 733 unsigned i; 734 int ret; 735 736 if (!gpu) 737 return -ENXIO; 738 739 if (args->pad) 740 return -EINVAL; 741 742 if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) { 743 DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n"); 744 return -EPERM; 745 } 746 747 /* for now, we just have 3d pipe.. eventually this would need to 748 * be more clever to dispatch to appropriate gpu module: 749 */ 750 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0) 751 return -EINVAL; 752 753 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) 754 return -EINVAL; 755 756 if (args->flags & MSM_SUBMIT_SUDO) { 757 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) || 758 !capable(CAP_SYS_RAWIO)) 759 return -EINVAL; 760 } 761 762 queue = msm_submitqueue_get(ctx, args->queueid); 763 if (!queue) 764 return -ENOENT; 765 766 ring = gpu->rb[queue->ring_nr]; 767 768 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { 769 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 770 if (out_fence_fd < 0) { 771 ret = out_fence_fd; 772 goto out_post_unlock; 773 } 774 } 775 776 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); 777 if (IS_ERR(submit)) { 778 ret = PTR_ERR(submit); 779 goto out_post_unlock; 780 } 781 782 trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident, 783 args->nr_bos, args->nr_cmds); 784 785 ret = mutex_lock_interruptible(&queue->lock); 786 if (ret) 787 goto out_post_unlock; 788 789 if (args->flags & MSM_SUBMIT_SUDO) 790 submit->in_rb = true; 791 792 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { 793 struct dma_fence *in_fence; 794 795 in_fence = sync_file_get_fence(args->fence_fd); 796 797 if (!in_fence) { 798 ret = -EINVAL; 799 goto out_unlock; 800 } 801 802 ret = drm_sched_job_add_dependency(&submit->base, in_fence); 803 if (ret) 804 goto out_unlock; 805 } 806 807 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) { 808 syncobjs_to_reset = msm_parse_deps(submit, file, 809 args->in_syncobjs, 810 args->nr_in_syncobjs, 811 args->syncobj_stride, ring); 812 if (IS_ERR(syncobjs_to_reset)) { 813 ret = PTR_ERR(syncobjs_to_reset); 814 goto out_unlock; 815 } 816 } 817 818 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) { 819 post_deps = msm_parse_post_deps(dev, file, 820 args->out_syncobjs, 821 args->nr_out_syncobjs, 822 args->syncobj_stride); 823 if (IS_ERR(post_deps)) { 824 ret = PTR_ERR(post_deps); 825 goto out_unlock; 826 } 827 } 828 829 ret = submit_lookup_objects(submit, args, file); 830 if (ret) 831 goto out; 832 833 ret = submit_lookup_cmds(submit, args, file); 834 if (ret) 835 goto out; 836 837 /* copy_*_user while holding a ww ticket upsets lockdep */ 838 ww_acquire_init(&submit->ticket, &reservation_ww_class); 839 has_ww_ticket = true; 840 ret = submit_lock_objects(submit); 841 if (ret) 842 goto out; 843 844 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); 845 if (ret) 846 goto out; 847 848 ret = submit_pin_objects(submit); 849 if (ret) 850 goto out; 851 852 for (i = 0; i < args->nr_cmds; i++) { 853 struct msm_gem_object *msm_obj; 854 uint64_t iova; 855 856 ret = submit_bo(submit, submit->cmd[i].idx, 857 &msm_obj, &iova, NULL); 858 if (ret) 859 goto out; 860 861 if (!submit->cmd[i].size || 862 ((submit->cmd[i].size + submit->cmd[i].offset) > 863 msm_obj->base.size / 4)) { 864 DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4); 865 ret = -EINVAL; 866 goto out; 867 } 868 869 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4); 870 871 if (submit->valid) 872 continue; 873 874 ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4, 875 submit->cmd[i].nr_relocs, submit->cmd[i].relocs); 876 if (ret) 877 goto out; 878 } 879 880 submit->nr_cmds = i; 881 882 idr_preload(GFP_KERNEL); 883 884 spin_lock(&queue->idr_lock); 885 886 /* 887 * If using userspace provided seqno fence, validate that the id 888 * is available before arming sched job. Since access to fence_idr 889 * is serialized on the queue lock, the slot should be still avail 890 * after the job is armed 891 */ 892 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) && 893 idr_find(&queue->fence_idr, args->fence)) { 894 spin_unlock(&queue->idr_lock); 895 idr_preload_end(); 896 ret = -EINVAL; 897 goto out; 898 } 899 900 drm_sched_job_arm(&submit->base); 901 902 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished); 903 904 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) { 905 /* 906 * Userspace has assigned the seqno fence that it wants 907 * us to use. It is an error to pick a fence sequence 908 * number that is not available. 909 */ 910 submit->fence_id = args->fence; 911 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence, 912 &submit->fence_id, submit->fence_id, 913 GFP_NOWAIT); 914 /* 915 * We've already validated that the fence_id slot is valid, 916 * so if idr_alloc_u32 failed, it is a kernel bug 917 */ 918 WARN_ON(ret); 919 } else { 920 /* 921 * Allocate an id which can be used by WAIT_FENCE ioctl to map 922 * back to the underlying fence. 923 */ 924 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr, 925 submit->user_fence, 1, 926 INT_MAX, GFP_NOWAIT); 927 } 928 929 spin_unlock(&queue->idr_lock); 930 idr_preload_end(); 931 932 if (submit->fence_id < 0) { 933 ret = submit->fence_id; 934 submit->fence_id = 0; 935 } 936 937 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { 938 struct sync_file *sync_file = sync_file_create(submit->user_fence); 939 if (!sync_file) { 940 ret = -ENOMEM; 941 } else { 942 fd_install(out_fence_fd, sync_file->file); 943 args->fence_fd = out_fence_fd; 944 } 945 } 946 947 submit_attach_object_fences(submit); 948 949 /* The scheduler owns a ref now: */ 950 msm_gem_submit_get(submit); 951 952 drm_sched_entity_push_job(&submit->base); 953 954 args->fence = submit->fence_id; 955 queue->last_fence = submit->fence_id; 956 957 msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); 958 msm_process_post_deps(post_deps, args->nr_out_syncobjs, 959 submit->user_fence); 960 961 962 out: 963 submit_cleanup(submit, !!ret); 964 if (has_ww_ticket) 965 ww_acquire_fini(&submit->ticket); 966 out_unlock: 967 mutex_unlock(&queue->lock); 968 out_post_unlock: 969 if (ret && (out_fence_fd >= 0)) 970 put_unused_fd(out_fence_fd); 971 972 if (!IS_ERR_OR_NULL(submit)) { 973 msm_gem_submit_put(submit); 974 } else { 975 /* 976 * If the submit hasn't yet taken ownership of the queue 977 * then we need to drop the reference ourself: 978 */ 979 msm_submitqueue_put(queue); 980 } 981 if (!IS_ERR_OR_NULL(post_deps)) { 982 for (i = 0; i < args->nr_out_syncobjs; ++i) { 983 kfree(post_deps[i].chain); 984 drm_syncobj_put(post_deps[i].syncobj); 985 } 986 kfree(post_deps); 987 } 988 989 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) { 990 for (i = 0; i < args->nr_in_syncobjs; ++i) { 991 if (syncobjs_to_reset[i]) 992 drm_syncobj_put(syncobjs_to_reset[i]); 993 } 994 kfree(syncobjs_to_reset); 995 } 996 997 return ret; 998 } 999