1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2014-2018 Broadcom */ 3 4 #include <drm/drmP.h> 5 #include <drm/drm_syncobj.h> 6 #include <linux/module.h> 7 #include <linux/platform_device.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/device.h> 10 #include <linux/io.h> 11 #include <linux/sched/signal.h> 12 13 #include "uapi/drm/v3d_drm.h" 14 #include "v3d_drv.h" 15 #include "v3d_regs.h" 16 #include "v3d_trace.h" 17 18 static void 19 v3d_init_core(struct v3d_dev *v3d, int core) 20 { 21 /* Set OVRTMUOUT, which means that the texture sampler uniform 22 * configuration's tmu output type field is used, instead of 23 * using the hardware default behavior based on the texture 24 * type. If you want the default behavior, you can still put 25 * "2" in the indirect texture state's output_type field. 26 */ 27 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); 28 29 /* Whenever we flush the L2T cache, we always want to flush 30 * the whole thing. 31 */ 32 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); 33 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); 34 } 35 36 /* Sets invariant state for the HW. */ 37 static void 38 v3d_init_hw_state(struct v3d_dev *v3d) 39 { 40 v3d_init_core(v3d, 0); 41 } 42 43 static void 44 v3d_idle_axi(struct v3d_dev *v3d, int core) 45 { 46 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); 47 48 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & 49 (V3D_GMP_STATUS_RD_COUNT_MASK | 50 V3D_GMP_STATUS_WR_COUNT_MASK | 51 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { 52 DRM_ERROR("Failed to wait for safe GMP shutdown\n"); 53 } 54 } 55 56 static void 57 v3d_idle_gca(struct v3d_dev *v3d) 58 { 59 if (v3d->ver >= 41) 60 return; 61 62 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); 63 64 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & 65 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == 66 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { 67 DRM_ERROR("Failed to wait for safe GCA shutdown\n"); 68 } 69 } 70 71 static void 72 v3d_reset_v3d(struct v3d_dev *v3d) 73 { 74 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); 75 76 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { 77 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 78 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); 79 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); 80 81 /* GFXH-1383: The SW_INIT may cause a stray write to address 0 82 * of the unit, so reset it to its power-on value here. 83 */ 84 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); 85 } else { 86 WARN_ON_ONCE(V3D_GET_FIELD(version, 87 V3D_TOP_GR_BRIDGE_MAJOR) != 7); 88 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 89 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); 90 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); 91 } 92 93 v3d_init_hw_state(v3d); 94 } 95 96 void 97 v3d_reset(struct v3d_dev *v3d) 98 { 99 struct drm_device *dev = &v3d->drm; 100 101 DRM_ERROR("Resetting GPU.\n"); 102 trace_v3d_reset_begin(dev); 103 104 /* XXX: only needed for safe powerdown, not reset. */ 105 if (false) 106 v3d_idle_axi(v3d, 0); 107 108 v3d_idle_gca(v3d); 109 v3d_reset_v3d(v3d); 110 111 v3d_mmu_set_page_table(v3d); 112 v3d_irq_reset(v3d); 113 114 trace_v3d_reset_end(dev); 115 } 116 117 static void 118 v3d_flush_l3(struct v3d_dev *v3d) 119 { 120 if (v3d->ver < 41) { 121 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); 122 123 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, 124 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); 125 126 if (v3d->ver < 33) { 127 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, 128 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); 129 } 130 } 131 } 132 133 /* Invalidates the (read-only) L2 cache. */ 134 static void 135 v3d_invalidate_l2(struct v3d_dev *v3d, int core) 136 { 137 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, 138 V3D_L2CACTL_L2CCLR | 139 V3D_L2CACTL_L2CENA); 140 } 141 142 static void 143 v3d_invalidate_l1td(struct v3d_dev *v3d, int core) 144 { 145 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); 146 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 147 V3D_L2TCACTL_L2TFLS), 100)) { 148 DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); 149 } 150 } 151 152 /* Invalidates texture L2 cachelines */ 153 static void 154 v3d_flush_l2t(struct v3d_dev *v3d, int core) 155 { 156 v3d_invalidate_l1td(v3d, core); 157 158 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, 159 V3D_L2TCACTL_L2TFLS | 160 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); 161 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 162 V3D_L2TCACTL_L2TFLS), 100)) { 163 DRM_ERROR("Timeout waiting for L2T flush\n"); 164 } 165 } 166 167 /* Invalidates the slice caches. These are read-only caches. */ 168 static void 169 v3d_invalidate_slices(struct v3d_dev *v3d, int core) 170 { 171 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, 172 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | 173 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | 174 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | 175 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); 176 } 177 178 /* Invalidates texture L2 cachelines */ 179 static void 180 v3d_invalidate_l2t(struct v3d_dev *v3d, int core) 181 { 182 V3D_CORE_WRITE(core, 183 V3D_CTL_L2TCACTL, 184 V3D_L2TCACTL_L2TFLS | 185 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM)); 186 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 187 V3D_L2TCACTL_L2TFLS), 100)) { 188 DRM_ERROR("Timeout waiting for L2T invalidate\n"); 189 } 190 } 191 192 void 193 v3d_invalidate_caches(struct v3d_dev *v3d) 194 { 195 v3d_flush_l3(v3d); 196 197 v3d_invalidate_l2(v3d, 0); 198 v3d_invalidate_slices(v3d, 0); 199 v3d_flush_l2t(v3d, 0); 200 } 201 202 void 203 v3d_flush_caches(struct v3d_dev *v3d) 204 { 205 v3d_invalidate_l1td(v3d, 0); 206 v3d_invalidate_l2t(v3d, 0); 207 } 208 209 static void 210 v3d_attach_object_fences(struct v3d_bo **bos, int bo_count, 211 struct dma_fence *fence) 212 { 213 int i; 214 215 for (i = 0; i < bo_count; i++) { 216 /* XXX: Use shared fences for read-only objects. */ 217 reservation_object_add_excl_fence(bos[i]->resv, fence); 218 } 219 } 220 221 static void 222 v3d_unlock_bo_reservations(struct v3d_bo **bos, 223 int bo_count, 224 struct ww_acquire_ctx *acquire_ctx) 225 { 226 int i; 227 228 for (i = 0; i < bo_count; i++) 229 ww_mutex_unlock(&bos[i]->resv->lock); 230 231 ww_acquire_fini(acquire_ctx); 232 } 233 234 /* Takes the reservation lock on all the BOs being referenced, so that 235 * at queue submit time we can update the reservations. 236 * 237 * We don't lock the RCL the tile alloc/state BOs, or overflow memory 238 * (all of which are on exec->unref_list). They're entirely private 239 * to v3d, so we don't attach dma-buf fences to them. 240 */ 241 static int 242 v3d_lock_bo_reservations(struct v3d_bo **bos, 243 int bo_count, 244 struct ww_acquire_ctx *acquire_ctx) 245 { 246 int contended_lock = -1; 247 int i, ret; 248 249 ww_acquire_init(acquire_ctx, &reservation_ww_class); 250 251 retry: 252 if (contended_lock != -1) { 253 struct v3d_bo *bo = bos[contended_lock]; 254 255 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 256 acquire_ctx); 257 if (ret) { 258 ww_acquire_done(acquire_ctx); 259 return ret; 260 } 261 } 262 263 for (i = 0; i < bo_count; i++) { 264 if (i == contended_lock) 265 continue; 266 267 ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock, 268 acquire_ctx); 269 if (ret) { 270 int j; 271 272 for (j = 0; j < i; j++) 273 ww_mutex_unlock(&bos[j]->resv->lock); 274 275 if (contended_lock != -1 && contended_lock >= i) { 276 struct v3d_bo *bo = bos[contended_lock]; 277 278 ww_mutex_unlock(&bo->resv->lock); 279 } 280 281 if (ret == -EDEADLK) { 282 contended_lock = i; 283 goto retry; 284 } 285 286 ww_acquire_done(acquire_ctx); 287 return ret; 288 } 289 } 290 291 ww_acquire_done(acquire_ctx); 292 293 /* Reserve space for our shared (read-only) fence references, 294 * before we commit the CL to the hardware. 295 */ 296 for (i = 0; i < bo_count; i++) { 297 ret = reservation_object_reserve_shared(bos[i]->resv, 1); 298 if (ret) { 299 v3d_unlock_bo_reservations(bos, bo_count, 300 acquire_ctx); 301 return ret; 302 } 303 } 304 305 return 0; 306 } 307 308 /** 309 * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects 310 * referenced by the job. 311 * @dev: DRM device 312 * @file_priv: DRM file for this fd 313 * @exec: V3D job being set up 314 * 315 * The command validator needs to reference BOs by their index within 316 * the submitted job's BO list. This does the validation of the job's 317 * BO list and reference counting for the lifetime of the job. 318 * 319 * Note that this function doesn't need to unreference the BOs on 320 * failure, because that will happen at v3d_exec_cleanup() time. 321 */ 322 static int 323 v3d_cl_lookup_bos(struct drm_device *dev, 324 struct drm_file *file_priv, 325 struct drm_v3d_submit_cl *args, 326 struct v3d_exec_info *exec) 327 { 328 u32 *handles; 329 int ret = 0; 330 int i; 331 332 exec->bo_count = args->bo_handle_count; 333 334 if (!exec->bo_count) { 335 /* See comment on bo_index for why we have to check 336 * this. 337 */ 338 DRM_DEBUG("Rendering requires BOs\n"); 339 return -EINVAL; 340 } 341 342 exec->bo = kvmalloc_array(exec->bo_count, 343 sizeof(struct drm_gem_cma_object *), 344 GFP_KERNEL | __GFP_ZERO); 345 if (!exec->bo) { 346 DRM_DEBUG("Failed to allocate validated BO pointers\n"); 347 return -ENOMEM; 348 } 349 350 handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL); 351 if (!handles) { 352 ret = -ENOMEM; 353 DRM_DEBUG("Failed to allocate incoming GEM handles\n"); 354 goto fail; 355 } 356 357 if (copy_from_user(handles, 358 (void __user *)(uintptr_t)args->bo_handles, 359 exec->bo_count * sizeof(u32))) { 360 ret = -EFAULT; 361 DRM_DEBUG("Failed to copy in GEM handles\n"); 362 goto fail; 363 } 364 365 spin_lock(&file_priv->table_lock); 366 for (i = 0; i < exec->bo_count; i++) { 367 struct drm_gem_object *bo = idr_find(&file_priv->object_idr, 368 handles[i]); 369 if (!bo) { 370 DRM_DEBUG("Failed to look up GEM BO %d: %d\n", 371 i, handles[i]); 372 ret = -ENOENT; 373 spin_unlock(&file_priv->table_lock); 374 goto fail; 375 } 376 drm_gem_object_get(bo); 377 exec->bo[i] = to_v3d_bo(bo); 378 } 379 spin_unlock(&file_priv->table_lock); 380 381 fail: 382 kvfree(handles); 383 return ret; 384 } 385 386 static void 387 v3d_exec_cleanup(struct kref *ref) 388 { 389 struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info, 390 refcount); 391 struct v3d_dev *v3d = exec->v3d; 392 unsigned int i; 393 struct v3d_bo *bo, *save; 394 395 dma_fence_put(exec->bin.in_fence); 396 dma_fence_put(exec->render.in_fence); 397 398 dma_fence_put(exec->bin.done_fence); 399 dma_fence_put(exec->render.done_fence); 400 401 dma_fence_put(exec->bin_done_fence); 402 dma_fence_put(exec->render_done_fence); 403 404 for (i = 0; i < exec->bo_count; i++) 405 drm_gem_object_put_unlocked(&exec->bo[i]->base); 406 kvfree(exec->bo); 407 408 list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { 409 drm_gem_object_put_unlocked(&bo->base); 410 } 411 412 pm_runtime_mark_last_busy(v3d->dev); 413 pm_runtime_put_autosuspend(v3d->dev); 414 415 kfree(exec); 416 } 417 418 void v3d_exec_put(struct v3d_exec_info *exec) 419 { 420 kref_put(&exec->refcount, v3d_exec_cleanup); 421 } 422 423 static void 424 v3d_tfu_job_cleanup(struct kref *ref) 425 { 426 struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job, 427 refcount); 428 struct v3d_dev *v3d = job->v3d; 429 unsigned int i; 430 431 dma_fence_put(job->in_fence); 432 dma_fence_put(job->done_fence); 433 434 for (i = 0; i < ARRAY_SIZE(job->bo); i++) { 435 if (job->bo[i]) 436 drm_gem_object_put_unlocked(&job->bo[i]->base); 437 } 438 439 pm_runtime_mark_last_busy(v3d->dev); 440 pm_runtime_put_autosuspend(v3d->dev); 441 442 kfree(job); 443 } 444 445 void v3d_tfu_job_put(struct v3d_tfu_job *job) 446 { 447 kref_put(&job->refcount, v3d_tfu_job_cleanup); 448 } 449 450 int 451 v3d_wait_bo_ioctl(struct drm_device *dev, void *data, 452 struct drm_file *file_priv) 453 { 454 int ret; 455 struct drm_v3d_wait_bo *args = data; 456 struct drm_gem_object *gem_obj; 457 struct v3d_bo *bo; 458 ktime_t start = ktime_get(); 459 u64 delta_ns; 460 unsigned long timeout_jiffies = 461 nsecs_to_jiffies_timeout(args->timeout_ns); 462 463 if (args->pad != 0) 464 return -EINVAL; 465 466 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 467 if (!gem_obj) { 468 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 469 return -EINVAL; 470 } 471 bo = to_v3d_bo(gem_obj); 472 473 ret = reservation_object_wait_timeout_rcu(bo->resv, 474 true, true, 475 timeout_jiffies); 476 477 if (ret == 0) 478 ret = -ETIME; 479 else if (ret > 0) 480 ret = 0; 481 482 /* Decrement the user's timeout, in case we got interrupted 483 * such that the ioctl will be restarted. 484 */ 485 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); 486 if (delta_ns < args->timeout_ns) 487 args->timeout_ns -= delta_ns; 488 else 489 args->timeout_ns = 0; 490 491 /* Asked to wait beyond the jiffie/scheduler precision? */ 492 if (ret == -ETIME && args->timeout_ns) 493 ret = -EAGAIN; 494 495 drm_gem_object_put_unlocked(gem_obj); 496 497 return ret; 498 } 499 500 /** 501 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. 502 * @dev: DRM device 503 * @data: ioctl argument 504 * @file_priv: DRM file for this fd 505 * 506 * This is the main entrypoint for userspace to submit a 3D frame to 507 * the GPU. Userspace provides the binner command list (if 508 * applicable), and the kernel sets up the render command list to draw 509 * to the framebuffer described in the ioctl, using the command lists 510 * that the 3D engine's binner will produce. 511 */ 512 int 513 v3d_submit_cl_ioctl(struct drm_device *dev, void *data, 514 struct drm_file *file_priv) 515 { 516 struct v3d_dev *v3d = to_v3d_dev(dev); 517 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 518 struct drm_v3d_submit_cl *args = data; 519 struct v3d_exec_info *exec; 520 struct ww_acquire_ctx acquire_ctx; 521 struct drm_syncobj *sync_out; 522 int ret = 0; 523 524 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); 525 526 if (args->pad != 0) { 527 DRM_INFO("pad must be zero: %d\n", args->pad); 528 return -EINVAL; 529 } 530 531 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); 532 if (!exec) 533 return -ENOMEM; 534 535 ret = pm_runtime_get_sync(v3d->dev); 536 if (ret < 0) { 537 kfree(exec); 538 return ret; 539 } 540 541 kref_init(&exec->refcount); 542 543 ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, 544 0, 0, &exec->bin.in_fence); 545 if (ret == -EINVAL) 546 goto fail; 547 548 ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, 549 0, 0, &exec->render.in_fence); 550 if (ret == -EINVAL) 551 goto fail; 552 553 exec->qma = args->qma; 554 exec->qms = args->qms; 555 exec->qts = args->qts; 556 exec->bin.exec = exec; 557 exec->bin.start = args->bcl_start; 558 exec->bin.end = args->bcl_end; 559 exec->render.exec = exec; 560 exec->render.start = args->rcl_start; 561 exec->render.end = args->rcl_end; 562 exec->v3d = v3d; 563 INIT_LIST_HEAD(&exec->unref_list); 564 565 ret = v3d_cl_lookup_bos(dev, file_priv, args, exec); 566 if (ret) 567 goto fail; 568 569 ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count, 570 &acquire_ctx); 571 if (ret) 572 goto fail; 573 574 mutex_lock(&v3d->sched_lock); 575 if (exec->bin.start != exec->bin.end) { 576 ret = drm_sched_job_init(&exec->bin.base, 577 &v3d_priv->sched_entity[V3D_BIN], 578 v3d_priv); 579 if (ret) 580 goto fail_unreserve; 581 582 exec->bin_done_fence = 583 dma_fence_get(&exec->bin.base.s_fence->finished); 584 585 kref_get(&exec->refcount); /* put by scheduler job completion */ 586 drm_sched_entity_push_job(&exec->bin.base, 587 &v3d_priv->sched_entity[V3D_BIN]); 588 } 589 590 ret = drm_sched_job_init(&exec->render.base, 591 &v3d_priv->sched_entity[V3D_RENDER], 592 v3d_priv); 593 if (ret) 594 goto fail_unreserve; 595 596 exec->render_done_fence = 597 dma_fence_get(&exec->render.base.s_fence->finished); 598 599 kref_get(&exec->refcount); /* put by scheduler job completion */ 600 drm_sched_entity_push_job(&exec->render.base, 601 &v3d_priv->sched_entity[V3D_RENDER]); 602 mutex_unlock(&v3d->sched_lock); 603 604 v3d_attach_object_fences(exec->bo, exec->bo_count, 605 exec->render_done_fence); 606 607 v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); 608 609 /* Update the return sync object for the */ 610 sync_out = drm_syncobj_find(file_priv, args->out_sync); 611 if (sync_out) { 612 drm_syncobj_replace_fence(sync_out, exec->render_done_fence); 613 drm_syncobj_put(sync_out); 614 } 615 616 v3d_exec_put(exec); 617 618 return 0; 619 620 fail_unreserve: 621 mutex_unlock(&v3d->sched_lock); 622 v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); 623 fail: 624 v3d_exec_put(exec); 625 626 return ret; 627 } 628 629 /** 630 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. 631 * @dev: DRM device 632 * @data: ioctl argument 633 * @file_priv: DRM file for this fd 634 * 635 * Userspace provides the register setup for the TFU, which we don't 636 * need to validate since the TFU is behind the MMU. 637 */ 638 int 639 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, 640 struct drm_file *file_priv) 641 { 642 struct v3d_dev *v3d = to_v3d_dev(dev); 643 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 644 struct drm_v3d_submit_tfu *args = data; 645 struct v3d_tfu_job *job; 646 struct ww_acquire_ctx acquire_ctx; 647 struct drm_syncobj *sync_out; 648 struct dma_fence *sched_done_fence; 649 int ret = 0; 650 int bo_count; 651 652 trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); 653 654 job = kcalloc(1, sizeof(*job), GFP_KERNEL); 655 if (!job) 656 return -ENOMEM; 657 658 ret = pm_runtime_get_sync(v3d->dev); 659 if (ret < 0) { 660 kfree(job); 661 return ret; 662 } 663 664 kref_init(&job->refcount); 665 666 ret = drm_syncobj_find_fence(file_priv, args->in_sync, 667 0, 0, &job->in_fence); 668 if (ret == -EINVAL) 669 goto fail; 670 671 job->args = *args; 672 job->v3d = v3d; 673 674 spin_lock(&file_priv->table_lock); 675 for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) { 676 struct drm_gem_object *bo; 677 678 if (!args->bo_handles[bo_count]) 679 break; 680 681 bo = idr_find(&file_priv->object_idr, 682 args->bo_handles[bo_count]); 683 if (!bo) { 684 DRM_DEBUG("Failed to look up GEM BO %d: %d\n", 685 bo_count, args->bo_handles[bo_count]); 686 ret = -ENOENT; 687 spin_unlock(&file_priv->table_lock); 688 goto fail; 689 } 690 drm_gem_object_get(bo); 691 job->bo[bo_count] = to_v3d_bo(bo); 692 } 693 spin_unlock(&file_priv->table_lock); 694 695 ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx); 696 if (ret) 697 goto fail; 698 699 mutex_lock(&v3d->sched_lock); 700 ret = drm_sched_job_init(&job->base, 701 &v3d_priv->sched_entity[V3D_TFU], 702 v3d_priv); 703 if (ret) 704 goto fail_unreserve; 705 706 sched_done_fence = dma_fence_get(&job->base.s_fence->finished); 707 708 kref_get(&job->refcount); /* put by scheduler job completion */ 709 drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]); 710 mutex_unlock(&v3d->sched_lock); 711 712 v3d_attach_object_fences(job->bo, bo_count, sched_done_fence); 713 714 v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); 715 716 /* Update the return sync object */ 717 sync_out = drm_syncobj_find(file_priv, args->out_sync); 718 if (sync_out) { 719 drm_syncobj_replace_fence(sync_out, sched_done_fence); 720 drm_syncobj_put(sync_out); 721 } 722 dma_fence_put(sched_done_fence); 723 724 v3d_tfu_job_put(job); 725 726 return 0; 727 728 fail_unreserve: 729 mutex_unlock(&v3d->sched_lock); 730 v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); 731 fail: 732 v3d_tfu_job_put(job); 733 734 return ret; 735 } 736 737 int 738 v3d_gem_init(struct drm_device *dev) 739 { 740 struct v3d_dev *v3d = to_v3d_dev(dev); 741 u32 pt_size = 4096 * 1024; 742 int ret, i; 743 744 for (i = 0; i < V3D_MAX_QUEUES; i++) 745 v3d->queue[i].fence_context = dma_fence_context_alloc(1); 746 747 spin_lock_init(&v3d->mm_lock); 748 spin_lock_init(&v3d->job_lock); 749 mutex_init(&v3d->bo_lock); 750 mutex_init(&v3d->reset_lock); 751 mutex_init(&v3d->sched_lock); 752 753 /* Note: We don't allocate address 0. Various bits of HW 754 * treat 0 as special, such as the occlusion query counters 755 * where 0 means "disabled". 756 */ 757 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); 758 759 v3d->pt = dma_alloc_wc(v3d->dev, pt_size, 760 &v3d->pt_paddr, 761 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 762 if (!v3d->pt) { 763 drm_mm_takedown(&v3d->mm); 764 dev_err(v3d->dev, 765 "Failed to allocate page tables. " 766 "Please ensure you have CMA enabled.\n"); 767 return -ENOMEM; 768 } 769 770 v3d_init_hw_state(v3d); 771 v3d_mmu_set_page_table(v3d); 772 773 ret = v3d_sched_init(v3d); 774 if (ret) { 775 drm_mm_takedown(&v3d->mm); 776 dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, 777 v3d->pt_paddr); 778 } 779 780 return 0; 781 } 782 783 void 784 v3d_gem_destroy(struct drm_device *dev) 785 { 786 struct v3d_dev *v3d = to_v3d_dev(dev); 787 788 v3d_sched_fini(v3d); 789 790 /* Waiting for exec to finish would need to be done before 791 * unregistering V3D. 792 */ 793 WARN_ON(v3d->bin_job); 794 WARN_ON(v3d->render_job); 795 796 drm_mm_takedown(&v3d->mm); 797 798 dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); 799 } 800