1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_bo_driver.h> 7 8 #include "i915_deps.h" 9 #include "i915_drv.h" 10 #include "intel_memory_region.h" 11 #include "intel_region_ttm.h" 12 13 #include "gem/i915_gem_object.h" 14 #include "gem/i915_gem_region.h" 15 #include "gem/i915_gem_ttm.h" 16 #include "gem/i915_gem_ttm_move.h" 17 18 #include "gt/intel_engine_pm.h" 19 #include "gt/intel_gt.h" 20 #include "gt/intel_migrate.h" 21 22 /** 23 * DOC: Selftest failure modes for failsafe migration: 24 * 25 * For fail_gpu_migration, the gpu blit scheduled is always a clear blit 26 * rather than a copy blit, and then we force the failure paths as if 27 * the blit fence returned an error. 28 * 29 * For fail_work_allocation we fail the kmalloc of the async worker, we 30 * sync the gpu blit. If it then fails, or fail_gpu_migration is set to 31 * true, then a memcpy operation is performed sync. 32 */ 33 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 34 static bool fail_gpu_migration; 35 static bool fail_work_allocation; 36 37 void i915_ttm_migrate_set_failure_modes(bool gpu_migration, 38 bool work_allocation) 39 { 40 fail_gpu_migration = gpu_migration; 41 fail_work_allocation = work_allocation; 42 } 43 #endif 44 45 static enum i915_cache_level 46 i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, 47 struct ttm_tt *ttm) 48 { 49 return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && 50 !i915_ttm_gtt_binds_lmem(res) && 51 ttm->caching == ttm_cached) ? I915_CACHE_LLC : 52 I915_CACHE_NONE; 53 } 54 55 static struct intel_memory_region * 56 i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) 57 { 58 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); 59 60 /* There's some room for optimization here... */ 61 GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && 62 ttm_mem_type < I915_PL_LMEM0); 63 if (ttm_mem_type == I915_PL_SYSTEM) 64 return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, 65 0); 66 67 return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, 68 ttm_mem_type - I915_PL_LMEM0); 69 } 70 71 /** 72 * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a 73 * TTM move 74 * @obj: The gem object 75 */ 76 void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) 77 { 78 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 79 80 if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { 81 obj->write_domain = I915_GEM_DOMAIN_WC; 82 obj->read_domains = I915_GEM_DOMAIN_WC; 83 } else { 84 obj->write_domain = I915_GEM_DOMAIN_CPU; 85 obj->read_domains = I915_GEM_DOMAIN_CPU; 86 } 87 } 88 89 /** 90 * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move 91 * @obj: The gem object 92 * 93 * Adjusts the GEM object's region, mem_flags and cache coherency after a 94 * TTM move. 95 */ 96 void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) 97 { 98 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 99 unsigned int cache_level; 100 unsigned int i; 101 102 /* 103 * If object was moved to an allowable region, update the object 104 * region to consider it migrated. Note that if it's currently not 105 * in an allowable region, it's evicted and we don't update the 106 * object region. 107 */ 108 if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { 109 for (i = 0; i < obj->mm.n_placements; ++i) { 110 struct intel_memory_region *mr = obj->mm.placements[i]; 111 112 if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && 113 mr != obj->mm.region) { 114 i915_gem_object_release_memory_region(obj); 115 i915_gem_object_init_memory_region(obj, mr); 116 break; 117 } 118 } 119 } 120 121 obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); 122 123 obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : 124 I915_BO_FLAG_STRUCT_PAGE; 125 126 cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, 127 bo->ttm); 128 i915_gem_object_set_cache_coherency(obj, cache_level); 129 } 130 131 /** 132 * i915_ttm_move_notify - Prepare an object for move 133 * @bo: The ttm buffer object. 134 * 135 * This function prepares an object for move by removing all GPU bindings, 136 * removing all CPU mapings and finally releasing the pages sg-table. 137 * 138 * Return: 0 if successful, negative error code on error. 139 */ 140 int i915_ttm_move_notify(struct ttm_buffer_object *bo) 141 { 142 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 143 int ret; 144 145 /* 146 * Note: The async unbinding here will actually transform the 147 * blocking wait for unbind into a wait before finally submitting 148 * evict / migration blit and thus stall the migration timeline 149 * which may not be good for overall throughput. We should make 150 * sure we await the unbind fences *after* the migration blit 151 * instead of *before* as we currently do. 152 */ 153 ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE | 154 I915_GEM_OBJECT_UNBIND_ASYNC); 155 if (ret) 156 return ret; 157 158 ret = __i915_gem_object_put_pages(obj); 159 if (ret) 160 return ret; 161 162 return 0; 163 } 164 165 static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, 166 bool clear, 167 struct ttm_resource *dst_mem, 168 struct ttm_tt *dst_ttm, 169 struct sg_table *dst_st, 170 const struct i915_deps *deps) 171 { 172 struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), 173 bdev); 174 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 175 struct i915_request *rq; 176 struct ttm_tt *src_ttm = bo->ttm; 177 enum i915_cache_level src_level, dst_level; 178 int ret; 179 180 if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915))) 181 return ERR_PTR(-EINVAL); 182 183 /* With fail_gpu_migration, we always perform a GPU clear. */ 184 if (I915_SELFTEST_ONLY(fail_gpu_migration)) 185 clear = true; 186 187 dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); 188 if (clear) { 189 if (bo->type == ttm_bo_type_kernel && 190 !I915_SELFTEST_ONLY(fail_gpu_migration)) 191 return ERR_PTR(-EINVAL); 192 193 intel_engine_pm_get(to_gt(i915)->migrate.context->engine); 194 ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps, 195 dst_st->sgl, dst_level, 196 i915_ttm_gtt_binds_lmem(dst_mem), 197 0, &rq); 198 } else { 199 struct i915_refct_sgt *src_rsgt = 200 i915_ttm_resource_get_st(obj, bo->resource); 201 202 if (IS_ERR(src_rsgt)) 203 return ERR_CAST(src_rsgt); 204 205 src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); 206 intel_engine_pm_get(to_gt(i915)->migrate.context->engine); 207 ret = intel_context_migrate_copy(to_gt(i915)->migrate.context, 208 deps, src_rsgt->table.sgl, 209 src_level, 210 i915_ttm_gtt_binds_lmem(bo->resource), 211 dst_st->sgl, dst_level, 212 i915_ttm_gtt_binds_lmem(dst_mem), 213 &rq); 214 215 i915_refct_sgt_put(src_rsgt); 216 } 217 218 intel_engine_pm_put(to_gt(i915)->migrate.context->engine); 219 220 if (ret && rq) { 221 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); 222 i915_request_put(rq); 223 } 224 225 return ret ? ERR_PTR(ret) : &rq->fence; 226 } 227 228 /** 229 * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality. 230 * @_dst_iter: Storage space for the destination kmap iterator. 231 * @_src_iter: Storage space for the source kmap iterator. 232 * @dst_iter: Pointer to the destination kmap iterator. 233 * @src_iter: Pointer to the source kmap iterator. 234 * @clear: Whether to clear instead of copy. 235 * @src_rsgt: Refcounted scatter-gather list of source memory. 236 * @dst_rsgt: Refcounted scatter-gather list of destination memory. 237 */ 238 struct i915_ttm_memcpy_arg { 239 union { 240 struct ttm_kmap_iter_tt tt; 241 struct ttm_kmap_iter_iomap io; 242 } _dst_iter, 243 _src_iter; 244 struct ttm_kmap_iter *dst_iter; 245 struct ttm_kmap_iter *src_iter; 246 unsigned long num_pages; 247 bool clear; 248 struct i915_refct_sgt *src_rsgt; 249 struct i915_refct_sgt *dst_rsgt; 250 }; 251 252 /** 253 * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence. 254 * @fence: The dma-fence. 255 * @work: The work struct use for the memcpy work. 256 * @lock: The fence lock. Not used to protect anything else ATM. 257 * @irq_work: Low latency worker to signal the fence since it can't be done 258 * from the callback for lockdep reasons. 259 * @cb: Callback for the accelerated migration fence. 260 * @arg: The argument for the memcpy functionality. 261 */ 262 struct i915_ttm_memcpy_work { 263 struct dma_fence fence; 264 struct work_struct work; 265 /* The fence lock */ 266 spinlock_t lock; 267 struct irq_work irq_work; 268 struct dma_fence_cb cb; 269 struct i915_ttm_memcpy_arg arg; 270 }; 271 272 static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg) 273 { 274 ttm_move_memcpy(arg->clear, arg->num_pages, 275 arg->dst_iter, arg->src_iter); 276 } 277 278 static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg, 279 struct ttm_buffer_object *bo, bool clear, 280 struct ttm_resource *dst_mem, 281 struct ttm_tt *dst_ttm, 282 struct i915_refct_sgt *dst_rsgt) 283 { 284 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 285 struct intel_memory_region *dst_reg, *src_reg; 286 287 dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); 288 src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); 289 GEM_BUG_ON(!dst_reg || !src_reg); 290 291 arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ? 292 ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) : 293 ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap, 294 &dst_rsgt->table, dst_reg->region.start); 295 296 arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ? 297 ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : 298 ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap, 299 &obj->ttm.cached_io_rsgt->table, 300 src_reg->region.start); 301 arg->clear = clear; 302 arg->num_pages = bo->base.size >> PAGE_SHIFT; 303 304 arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt); 305 arg->src_rsgt = clear ? NULL : 306 i915_ttm_resource_get_st(obj, bo->resource); 307 } 308 309 static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg) 310 { 311 i915_refct_sgt_put(arg->src_rsgt); 312 i915_refct_sgt_put(arg->dst_rsgt); 313 } 314 315 static void __memcpy_work(struct work_struct *work) 316 { 317 struct i915_ttm_memcpy_work *copy_work = 318 container_of(work, typeof(*copy_work), work); 319 struct i915_ttm_memcpy_arg *arg = ©_work->arg; 320 bool cookie = dma_fence_begin_signalling(); 321 322 i915_ttm_move_memcpy(arg); 323 dma_fence_end_signalling(cookie); 324 325 dma_fence_signal(©_work->fence); 326 327 i915_ttm_memcpy_release(arg); 328 dma_fence_put(©_work->fence); 329 } 330 331 static void __memcpy_irq_work(struct irq_work *irq_work) 332 { 333 struct i915_ttm_memcpy_work *copy_work = 334 container_of(irq_work, typeof(*copy_work), irq_work); 335 struct i915_ttm_memcpy_arg *arg = ©_work->arg; 336 337 dma_fence_signal(©_work->fence); 338 i915_ttm_memcpy_release(arg); 339 dma_fence_put(©_work->fence); 340 } 341 342 static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 343 { 344 struct i915_ttm_memcpy_work *copy_work = 345 container_of(cb, typeof(*copy_work), cb); 346 347 if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) { 348 INIT_WORK(©_work->work, __memcpy_work); 349 queue_work(system_unbound_wq, ©_work->work); 350 } else { 351 init_irq_work(©_work->irq_work, __memcpy_irq_work); 352 irq_work_queue(©_work->irq_work); 353 } 354 } 355 356 static const char *get_driver_name(struct dma_fence *fence) 357 { 358 return "i915_ttm_memcpy_work"; 359 } 360 361 static const char *get_timeline_name(struct dma_fence *fence) 362 { 363 return "unbound"; 364 } 365 366 static const struct dma_fence_ops dma_fence_memcpy_ops = { 367 .get_driver_name = get_driver_name, 368 .get_timeline_name = get_timeline_name, 369 }; 370 371 static struct dma_fence * 372 i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work, 373 struct dma_fence *dep) 374 { 375 int ret; 376 377 spin_lock_init(&work->lock); 378 dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0); 379 dma_fence_get(&work->fence); 380 ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb); 381 if (ret) { 382 if (ret != -ENOENT) 383 dma_fence_wait(dep, false); 384 385 return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL : 386 dep->error); 387 } 388 389 return &work->fence; 390 } 391 392 static struct dma_fence * 393 __i915_ttm_move(struct ttm_buffer_object *bo, 394 const struct ttm_operation_ctx *ctx, bool clear, 395 struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, 396 struct i915_refct_sgt *dst_rsgt, bool allow_accel, 397 const struct i915_deps *move_deps) 398 { 399 struct i915_ttm_memcpy_work *copy_work = NULL; 400 struct i915_ttm_memcpy_arg _arg, *arg = &_arg; 401 struct dma_fence *fence = ERR_PTR(-EINVAL); 402 403 if (allow_accel) { 404 fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, 405 &dst_rsgt->table, move_deps); 406 407 /* 408 * We only need to intercept the error when moving to lmem. 409 * When moving to system, TTM or shmem will provide us with 410 * cleared pages. 411 */ 412 if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) && 413 !I915_SELFTEST_ONLY(fail_gpu_migration || 414 fail_work_allocation)) 415 goto out; 416 } 417 418 /* If we've scheduled gpu migration. Try to arm error intercept. */ 419 if (!IS_ERR(fence)) { 420 struct dma_fence *dep = fence; 421 422 if (!I915_SELFTEST_ONLY(fail_work_allocation)) 423 copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL); 424 425 if (copy_work) { 426 arg = ©_work->arg; 427 i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, 428 dst_rsgt); 429 fence = i915_ttm_memcpy_work_arm(copy_work, dep); 430 } else { 431 dma_fence_wait(dep, false); 432 fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? 433 -EINVAL : fence->error); 434 } 435 dma_fence_put(dep); 436 437 if (!IS_ERR(fence)) 438 goto out; 439 } else { 440 int err = PTR_ERR(fence); 441 442 if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN) 443 return fence; 444 445 if (move_deps) { 446 err = i915_deps_sync(move_deps, ctx); 447 if (err) 448 return ERR_PTR(err); 449 } 450 } 451 452 /* Error intercept failed or no accelerated migration to start with */ 453 if (!copy_work) 454 i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, 455 dst_rsgt); 456 i915_ttm_move_memcpy(arg); 457 i915_ttm_memcpy_release(arg); 458 kfree(copy_work); 459 460 return NULL; 461 out: 462 if (!fence && copy_work) { 463 i915_ttm_memcpy_release(arg); 464 kfree(copy_work); 465 } 466 467 return fence; 468 } 469 470 /** 471 * i915_ttm_move - The TTM move callback used by i915. 472 * @bo: The buffer object. 473 * @evict: Whether this is an eviction. 474 * @dst_mem: The destination ttm resource. 475 * @hop: If we need multihop, what temporary memory type to move to. 476 * 477 * Return: 0 if successful, negative error code otherwise. 478 */ 479 int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, 480 struct ttm_operation_ctx *ctx, 481 struct ttm_resource *dst_mem, 482 struct ttm_place *hop) 483 { 484 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 485 struct ttm_resource_manager *dst_man = 486 ttm_manager_type(bo->bdev, dst_mem->mem_type); 487 struct dma_fence *migration_fence = NULL; 488 struct ttm_tt *ttm = bo->ttm; 489 struct i915_refct_sgt *dst_rsgt; 490 bool clear; 491 int ret; 492 493 if (GEM_WARN_ON(!obj)) { 494 ttm_bo_move_null(bo, dst_mem); 495 return 0; 496 } 497 498 ret = i915_ttm_move_notify(bo); 499 if (ret) 500 return ret; 501 502 if (obj->mm.madv != I915_MADV_WILLNEED) { 503 i915_ttm_purge(obj); 504 ttm_resource_free(bo, &dst_mem); 505 return 0; 506 } 507 508 /* Populate ttm with pages if needed. Typically system memory. */ 509 if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { 510 ret = ttm_tt_populate(bo->bdev, ttm, ctx); 511 if (ret) 512 return ret; 513 } 514 515 dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem); 516 if (IS_ERR(dst_rsgt)) 517 return PTR_ERR(dst_rsgt); 518 519 clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); 520 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) { 521 struct i915_deps deps; 522 523 i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 524 ret = i915_deps_add_resv(&deps, bo->base.resv, ctx); 525 if (ret) { 526 i915_refct_sgt_put(dst_rsgt); 527 return ret; 528 } 529 530 migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm, 531 dst_rsgt, true, &deps); 532 i915_deps_fini(&deps); 533 } 534 535 /* We can possibly get an -ERESTARTSYS here */ 536 if (IS_ERR(migration_fence)) { 537 i915_refct_sgt_put(dst_rsgt); 538 return PTR_ERR(migration_fence); 539 } 540 541 if (migration_fence) { 542 ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict, 543 true, dst_mem); 544 if (ret) { 545 dma_fence_wait(migration_fence, false); 546 ttm_bo_move_sync_cleanup(bo, dst_mem); 547 } 548 dma_fence_put(migration_fence); 549 } else { 550 ttm_bo_move_sync_cleanup(bo, dst_mem); 551 } 552 553 i915_ttm_adjust_domains_after_move(obj); 554 i915_ttm_free_cached_io_rsgt(obj); 555 556 if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) { 557 obj->ttm.cached_io_rsgt = dst_rsgt; 558 obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl; 559 obj->ttm.get_io_page.sg_idx = 0; 560 } else { 561 i915_refct_sgt_put(dst_rsgt); 562 } 563 564 i915_ttm_adjust_lru(obj); 565 i915_ttm_adjust_gem_after_move(obj); 566 return 0; 567 } 568 569 /** 570 * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to 571 * another 572 * @dst: The destination object 573 * @src: The source object 574 * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. 575 * @intr: Whether to perform waits interruptible: 576 * 577 * Note: The caller is responsible for assuring that the underlying 578 * TTM objects are populated if needed and locked. 579 * 580 * Return: Zero on success. Negative error code on error. If @intr == true, 581 * then it may return -ERESTARTSYS or -EINTR. 582 */ 583 int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, 584 struct drm_i915_gem_object *src, 585 bool allow_accel, bool intr) 586 { 587 struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); 588 struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); 589 struct ttm_operation_ctx ctx = { 590 .interruptible = intr, 591 }; 592 struct i915_refct_sgt *dst_rsgt; 593 struct dma_fence *copy_fence; 594 struct i915_deps deps; 595 int ret; 596 597 assert_object_held(dst); 598 assert_object_held(src); 599 i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 600 601 ret = dma_resv_reserve_fences(src_bo->base.resv, 1); 602 if (ret) 603 return ret; 604 605 ret = dma_resv_reserve_fences(dst_bo->base.resv, 1); 606 if (ret) 607 return ret; 608 609 ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx); 610 if (ret) 611 return ret; 612 613 ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx); 614 if (ret) 615 return ret; 616 617 dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); 618 copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource, 619 dst_bo->ttm, dst_rsgt, allow_accel, 620 &deps); 621 622 i915_deps_fini(&deps); 623 i915_refct_sgt_put(dst_rsgt); 624 if (IS_ERR_OR_NULL(copy_fence)) 625 return PTR_ERR_OR_ZERO(copy_fence); 626 627 dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE); 628 dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ); 629 dma_fence_put(copy_fence); 630 631 return 0; 632 } 633