1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <linux/dma-fence-array.h> 27 #include <drm/drm_gem.h> 28 29 #include "display/intel_frontbuffer.h" 30 #include "gem/i915_gem_lmem.h" 31 #include "gem/i915_gem_tiling.h" 32 #include "gt/intel_engine.h" 33 #include "gt/intel_engine_heartbeat.h" 34 #include "gt/intel_gt.h" 35 #include "gt/intel_gt_requests.h" 36 37 #include "i915_drv.h" 38 #include "i915_gem_evict.h" 39 #include "i915_sw_fence_work.h" 40 #include "i915_trace.h" 41 #include "i915_vma.h" 42 #include "i915_vma_resource.h" 43 44 static inline void assert_vma_held_evict(const struct i915_vma *vma) 45 { 46 /* 47 * We may be forced to unbind when the vm is dead, to clean it up. 48 * This is the only exception to the requirement of the object lock 49 * being held. 50 */ 51 if (kref_read(&vma->vm->ref)) 52 assert_object_held_shared(vma->obj); 53 } 54 55 static struct kmem_cache *slab_vmas; 56 57 static struct i915_vma *i915_vma_alloc(void) 58 { 59 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); 60 } 61 62 static void i915_vma_free(struct i915_vma *vma) 63 { 64 return kmem_cache_free(slab_vmas, vma); 65 } 66 67 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 68 69 #include <linux/stackdepot.h> 70 71 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 72 { 73 char buf[512]; 74 75 if (!vma->node.stack) { 76 drm_dbg(&to_i915(vma->obj->base.dev)->drm, 77 "vma.node [%08llx + %08llx] %s: unknown owner\n", 78 vma->node.start, vma->node.size, reason); 79 return; 80 } 81 82 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); 83 drm_dbg(&to_i915(vma->obj->base.dev)->drm, 84 "vma.node [%08llx + %08llx] %s: inserted at %s\n", 85 vma->node.start, vma->node.size, reason, buf); 86 } 87 88 #else 89 90 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 91 { 92 } 93 94 #endif 95 96 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 97 { 98 return container_of(ref, typeof(struct i915_vma), active); 99 } 100 101 static int __i915_vma_active(struct i915_active *ref) 102 { 103 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 104 } 105 106 static void __i915_vma_retire(struct i915_active *ref) 107 { 108 i915_vma_put(active_to_vma(ref)); 109 } 110 111 static struct i915_vma * 112 vma_create(struct drm_i915_gem_object *obj, 113 struct i915_address_space *vm, 114 const struct i915_gtt_view *view) 115 { 116 struct i915_vma *pos = ERR_PTR(-E2BIG); 117 struct i915_vma *vma; 118 struct rb_node *rb, **p; 119 int err; 120 121 /* The aliasing_ppgtt should never be used directly! */ 122 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 123 124 vma = i915_vma_alloc(); 125 if (vma == NULL) 126 return ERR_PTR(-ENOMEM); 127 128 vma->ops = &vm->vma_ops; 129 vma->obj = obj; 130 vma->size = obj->base.size; 131 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 132 133 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); 134 135 /* Declare ourselves safe for use inside shrinkers */ 136 if (IS_ENABLED(CONFIG_LOCKDEP)) { 137 fs_reclaim_acquire(GFP_KERNEL); 138 might_lock(&vma->active.mutex); 139 fs_reclaim_release(GFP_KERNEL); 140 } 141 142 INIT_LIST_HEAD(&vma->closed_link); 143 INIT_LIST_HEAD(&vma->obj_link); 144 RB_CLEAR_NODE(&vma->obj_node); 145 146 if (view && view->type != I915_GTT_VIEW_NORMAL) { 147 vma->gtt_view = *view; 148 if (view->type == I915_GTT_VIEW_PARTIAL) { 149 GEM_BUG_ON(range_overflows_t(u64, 150 view->partial.offset, 151 view->partial.size, 152 obj->base.size >> PAGE_SHIFT)); 153 vma->size = view->partial.size; 154 vma->size <<= PAGE_SHIFT; 155 GEM_BUG_ON(vma->size > obj->base.size); 156 } else if (view->type == I915_GTT_VIEW_ROTATED) { 157 vma->size = intel_rotation_info_size(&view->rotated); 158 vma->size <<= PAGE_SHIFT; 159 } else if (view->type == I915_GTT_VIEW_REMAPPED) { 160 vma->size = intel_remapped_info_size(&view->remapped); 161 vma->size <<= PAGE_SHIFT; 162 } 163 } 164 165 if (unlikely(vma->size > vm->total)) 166 goto err_vma; 167 168 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 169 170 err = mutex_lock_interruptible(&vm->mutex); 171 if (err) { 172 pos = ERR_PTR(err); 173 goto err_vma; 174 } 175 176 vma->vm = vm; 177 list_add_tail(&vma->vm_link, &vm->unbound_list); 178 179 spin_lock(&obj->vma.lock); 180 if (i915_is_ggtt(vm)) { 181 if (unlikely(overflows_type(vma->size, u32))) 182 goto err_unlock; 183 184 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 185 i915_gem_object_get_tiling(obj), 186 i915_gem_object_get_stride(obj)); 187 if (unlikely(vma->fence_size < vma->size || /* overflow */ 188 vma->fence_size > vm->total)) 189 goto err_unlock; 190 191 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 192 193 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 194 i915_gem_object_get_tiling(obj), 195 i915_gem_object_get_stride(obj)); 196 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 197 198 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 199 } 200 201 rb = NULL; 202 p = &obj->vma.tree.rb_node; 203 while (*p) { 204 long cmp; 205 206 rb = *p; 207 pos = rb_entry(rb, struct i915_vma, obj_node); 208 209 /* 210 * If the view already exists in the tree, another thread 211 * already created a matching vma, so return the older instance 212 * and dispose of ours. 213 */ 214 cmp = i915_vma_compare(pos, vm, view); 215 if (cmp < 0) 216 p = &rb->rb_right; 217 else if (cmp > 0) 218 p = &rb->rb_left; 219 else 220 goto err_unlock; 221 } 222 rb_link_node(&vma->obj_node, rb, p); 223 rb_insert_color(&vma->obj_node, &obj->vma.tree); 224 225 if (i915_vma_is_ggtt(vma)) 226 /* 227 * We put the GGTT vma at the start of the vma-list, followed 228 * by the ppGGTT vma. This allows us to break early when 229 * iterating over only the GGTT vma for an object, see 230 * for_each_ggtt_vma() 231 */ 232 list_add(&vma->obj_link, &obj->vma.list); 233 else 234 list_add_tail(&vma->obj_link, &obj->vma.list); 235 236 spin_unlock(&obj->vma.lock); 237 mutex_unlock(&vm->mutex); 238 239 return vma; 240 241 err_unlock: 242 spin_unlock(&obj->vma.lock); 243 list_del_init(&vma->vm_link); 244 mutex_unlock(&vm->mutex); 245 err_vma: 246 i915_vma_free(vma); 247 return pos; 248 } 249 250 static struct i915_vma * 251 i915_vma_lookup(struct drm_i915_gem_object *obj, 252 struct i915_address_space *vm, 253 const struct i915_gtt_view *view) 254 { 255 struct rb_node *rb; 256 257 rb = obj->vma.tree.rb_node; 258 while (rb) { 259 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 260 long cmp; 261 262 cmp = i915_vma_compare(vma, vm, view); 263 if (cmp == 0) 264 return vma; 265 266 if (cmp < 0) 267 rb = rb->rb_right; 268 else 269 rb = rb->rb_left; 270 } 271 272 return NULL; 273 } 274 275 /** 276 * i915_vma_instance - return the singleton instance of the VMA 277 * @obj: parent &struct drm_i915_gem_object to be mapped 278 * @vm: address space in which the mapping is located 279 * @view: additional mapping requirements 280 * 281 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 282 * the same @view characteristics. If a match is not found, one is created. 283 * Once created, the VMA is kept until either the object is freed, or the 284 * address space is closed. 285 * 286 * Returns the vma, or an error pointer. 287 */ 288 struct i915_vma * 289 i915_vma_instance(struct drm_i915_gem_object *obj, 290 struct i915_address_space *vm, 291 const struct i915_gtt_view *view) 292 { 293 struct i915_vma *vma; 294 295 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); 296 GEM_BUG_ON(!kref_read(&vm->ref)); 297 298 spin_lock(&obj->vma.lock); 299 vma = i915_vma_lookup(obj, vm, view); 300 spin_unlock(&obj->vma.lock); 301 302 /* vma_create() will resolve the race if another creates the vma */ 303 if (unlikely(!vma)) 304 vma = vma_create(obj, vm, view); 305 306 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 307 return vma; 308 } 309 310 struct i915_vma_work { 311 struct dma_fence_work base; 312 struct i915_address_space *vm; 313 struct i915_vm_pt_stash stash; 314 struct i915_vma_resource *vma_res; 315 struct drm_i915_gem_object *obj; 316 struct i915_sw_dma_fence_cb cb; 317 enum i915_cache_level cache_level; 318 unsigned int flags; 319 }; 320 321 static void __vma_bind(struct dma_fence_work *work) 322 { 323 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 324 struct i915_vma_resource *vma_res = vw->vma_res; 325 326 /* 327 * We are about the bind the object, which must mean we have already 328 * signaled the work to potentially clear/move the pages underneath. If 329 * something went wrong at that stage then the object should have 330 * unknown_state set, in which case we need to skip the bind. 331 */ 332 if (i915_gem_object_has_unknown_state(vw->obj)) 333 return; 334 335 vma_res->ops->bind_vma(vma_res->vm, &vw->stash, 336 vma_res, vw->cache_level, vw->flags); 337 } 338 339 static void __vma_release(struct dma_fence_work *work) 340 { 341 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 342 343 if (vw->obj) 344 i915_gem_object_put(vw->obj); 345 346 i915_vm_free_pt_stash(vw->vm, &vw->stash); 347 if (vw->vma_res) 348 i915_vma_resource_put(vw->vma_res); 349 } 350 351 static const struct dma_fence_work_ops bind_ops = { 352 .name = "bind", 353 .work = __vma_bind, 354 .release = __vma_release, 355 }; 356 357 struct i915_vma_work *i915_vma_work(void) 358 { 359 struct i915_vma_work *vw; 360 361 vw = kzalloc(sizeof(*vw), GFP_KERNEL); 362 if (!vw) 363 return NULL; 364 365 dma_fence_work_init(&vw->base, &bind_ops); 366 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 367 368 return vw; 369 } 370 371 int i915_vma_wait_for_bind(struct i915_vma *vma) 372 { 373 int err = 0; 374 375 if (rcu_access_pointer(vma->active.excl.fence)) { 376 struct dma_fence *fence; 377 378 rcu_read_lock(); 379 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 380 rcu_read_unlock(); 381 if (fence) { 382 err = dma_fence_wait(fence, true); 383 dma_fence_put(fence); 384 } 385 } 386 387 return err; 388 } 389 390 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 391 static int i915_vma_verify_bind_complete(struct i915_vma *vma) 392 { 393 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); 394 int err; 395 396 if (!fence) 397 return 0; 398 399 if (dma_fence_is_signaled(fence)) 400 err = fence->error; 401 else 402 err = -EBUSY; 403 404 dma_fence_put(fence); 405 406 return err; 407 } 408 #else 409 #define i915_vma_verify_bind_complete(_vma) 0 410 #endif 411 412 I915_SELFTEST_EXPORT void 413 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, 414 struct i915_vma *vma) 415 { 416 struct drm_i915_gem_object *obj = vma->obj; 417 418 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, 419 obj->mm.rsgt, i915_gem_object_is_readonly(obj), 420 i915_gem_object_is_lmem(obj), obj->mm.region, 421 vma->ops, vma->private, __i915_vma_offset(vma), 422 __i915_vma_size(vma), vma->size); 423 } 424 425 /** 426 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 427 * @vma: VMA to map 428 * @cache_level: mapping cache level 429 * @flags: flags like global or local mapping 430 * @work: preallocated worker for allocating and binding the PTE 431 * @vma_res: pointer to a preallocated vma resource. The resource is either 432 * consumed or freed. 433 * 434 * DMA addresses are taken from the scatter-gather table of this object (or of 435 * this VMA in case of non-default GGTT views) and PTE entries set up. 436 * Note that DMA addresses are also the only part of the SG table we care about. 437 */ 438 int i915_vma_bind(struct i915_vma *vma, 439 enum i915_cache_level cache_level, 440 u32 flags, 441 struct i915_vma_work *work, 442 struct i915_vma_resource *vma_res) 443 { 444 u32 bind_flags; 445 u32 vma_flags; 446 int ret; 447 448 lockdep_assert_held(&vma->vm->mutex); 449 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 450 GEM_BUG_ON(vma->size > i915_vma_size(vma)); 451 452 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 453 vma->node.size, 454 vma->vm->total))) { 455 i915_vma_resource_free(vma_res); 456 return -ENODEV; 457 } 458 459 if (GEM_DEBUG_WARN_ON(!flags)) { 460 i915_vma_resource_free(vma_res); 461 return -EINVAL; 462 } 463 464 bind_flags = flags; 465 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 466 467 vma_flags = atomic_read(&vma->flags); 468 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 469 470 bind_flags &= ~vma_flags; 471 if (bind_flags == 0) { 472 i915_vma_resource_free(vma_res); 473 return 0; 474 } 475 476 GEM_BUG_ON(!atomic_read(&vma->pages_count)); 477 478 /* Wait for or await async unbinds touching our range */ 479 if (work && bind_flags & vma->vm->bind_async_flags) 480 ret = i915_vma_resource_bind_dep_await(vma->vm, 481 &work->base.chain, 482 vma->node.start, 483 vma->node.size, 484 true, 485 GFP_NOWAIT | 486 __GFP_RETRY_MAYFAIL | 487 __GFP_NOWARN); 488 else 489 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start, 490 vma->node.size, true); 491 if (ret) { 492 i915_vma_resource_free(vma_res); 493 return ret; 494 } 495 496 if (vma->resource || !vma_res) { 497 /* Rebinding with an additional I915_VMA_*_BIND */ 498 GEM_WARN_ON(!vma_flags); 499 i915_vma_resource_free(vma_res); 500 } else { 501 i915_vma_resource_init_from_vma(vma_res, vma); 502 vma->resource = vma_res; 503 } 504 trace_i915_vma_bind(vma, bind_flags); 505 if (work && bind_flags & vma->vm->bind_async_flags) { 506 struct dma_fence *prev; 507 508 work->vma_res = i915_vma_resource_get(vma->resource); 509 work->cache_level = cache_level; 510 work->flags = bind_flags; 511 512 /* 513 * Note we only want to chain up to the migration fence on 514 * the pages (not the object itself). As we don't track that, 515 * yet, we have to use the exclusive fence instead. 516 * 517 * Also note that we do not want to track the async vma as 518 * part of the obj->resv->excl_fence as it only affects 519 * execution and not content or object's backing store lifetime. 520 */ 521 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 522 if (prev) { 523 __i915_sw_fence_await_dma_fence(&work->base.chain, 524 prev, 525 &work->cb); 526 dma_fence_put(prev); 527 } 528 529 work->base.dma.error = 0; /* enable the queue_work() */ 530 work->obj = i915_gem_object_get(vma->obj); 531 } else { 532 ret = i915_gem_object_wait_moving_fence(vma->obj, true); 533 if (ret) { 534 i915_vma_resource_free(vma->resource); 535 vma->resource = NULL; 536 537 return ret; 538 } 539 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level, 540 bind_flags); 541 } 542 543 atomic_or(bind_flags, &vma->flags); 544 return 0; 545 } 546 547 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 548 { 549 void __iomem *ptr; 550 int err; 551 552 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY)) 553 return IOMEM_ERR_PTR(-EINVAL); 554 555 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 556 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 557 GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); 558 559 ptr = READ_ONCE(vma->iomap); 560 if (ptr == NULL) { 561 /* 562 * TODO: consider just using i915_gem_object_pin_map() for lmem 563 * instead, which already supports mapping non-contiguous chunks 564 * of pages, that way we can also drop the 565 * I915_BO_ALLOC_CONTIGUOUS when allocating the object. 566 */ 567 if (i915_gem_object_is_lmem(vma->obj)) { 568 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, 569 vma->obj->base.size); 570 } else if (i915_vma_is_map_and_fenceable(vma)) { 571 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 572 i915_vma_offset(vma), 573 i915_vma_size(vma)); 574 } else { 575 ptr = (void __iomem *) 576 i915_gem_object_pin_map(vma->obj, I915_MAP_WC); 577 if (IS_ERR(ptr)) { 578 err = PTR_ERR(ptr); 579 goto err; 580 } 581 ptr = page_pack_bits(ptr, 1); 582 } 583 584 if (ptr == NULL) { 585 err = -ENOMEM; 586 goto err; 587 } 588 589 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 590 if (page_unmask_bits(ptr)) 591 __i915_gem_object_release_map(vma->obj); 592 else 593 io_mapping_unmap(ptr); 594 ptr = vma->iomap; 595 } 596 } 597 598 __i915_vma_pin(vma); 599 600 err = i915_vma_pin_fence(vma); 601 if (err) 602 goto err_unpin; 603 604 i915_vma_set_ggtt_write(vma); 605 606 /* NB Access through the GTT requires the device to be awake. */ 607 return page_mask_bits(ptr); 608 609 err_unpin: 610 __i915_vma_unpin(vma); 611 err: 612 return IOMEM_ERR_PTR(err); 613 } 614 615 void i915_vma_flush_writes(struct i915_vma *vma) 616 { 617 if (i915_vma_unset_ggtt_write(vma)) 618 intel_gt_flush_ggtt_writes(vma->vm->gt); 619 } 620 621 void i915_vma_unpin_iomap(struct i915_vma *vma) 622 { 623 GEM_BUG_ON(vma->iomap == NULL); 624 625 /* XXX We keep the mapping until __i915_vma_unbind()/evict() */ 626 627 i915_vma_flush_writes(vma); 628 629 i915_vma_unpin_fence(vma); 630 i915_vma_unpin(vma); 631 } 632 633 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 634 { 635 struct i915_vma *vma; 636 struct drm_i915_gem_object *obj; 637 638 vma = fetch_and_zero(p_vma); 639 if (!vma) 640 return; 641 642 obj = vma->obj; 643 GEM_BUG_ON(!obj); 644 645 i915_vma_unpin(vma); 646 647 if (flags & I915_VMA_RELEASE_MAP) 648 i915_gem_object_unpin_map(obj); 649 650 i915_gem_object_put(obj); 651 } 652 653 bool i915_vma_misplaced(const struct i915_vma *vma, 654 u64 size, u64 alignment, u64 flags) 655 { 656 if (!drm_mm_node_allocated(&vma->node)) 657 return false; 658 659 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 660 return true; 661 662 if (i915_vma_size(vma) < size) 663 return true; 664 665 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 666 if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment)) 667 return true; 668 669 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 670 return true; 671 672 if (flags & PIN_OFFSET_BIAS && 673 i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK)) 674 return true; 675 676 if (flags & PIN_OFFSET_FIXED && 677 i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK)) 678 return true; 679 680 return false; 681 } 682 683 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 684 { 685 bool mappable, fenceable; 686 687 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 688 GEM_BUG_ON(!vma->fence_size); 689 690 fenceable = (i915_vma_size(vma) >= vma->fence_size && 691 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment)); 692 693 mappable = i915_ggtt_offset(vma) + vma->fence_size <= 694 i915_vm_to_ggtt(vma->vm)->mappable_end; 695 696 if (mappable && fenceable) 697 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 698 else 699 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 700 } 701 702 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 703 { 704 struct drm_mm_node *node = &vma->node; 705 struct drm_mm_node *other; 706 707 /* 708 * On some machines we have to be careful when putting differing types 709 * of snoopable memory together to avoid the prefetcher crossing memory 710 * domains and dying. During vm initialisation, we decide whether or not 711 * these constraints apply and set the drm_mm.color_adjust 712 * appropriately. 713 */ 714 if (!i915_vm_has_cache_coloring(vma->vm)) 715 return true; 716 717 /* Only valid to be called on an already inserted vma */ 718 GEM_BUG_ON(!drm_mm_node_allocated(node)); 719 GEM_BUG_ON(list_empty(&node->node_list)); 720 721 other = list_prev_entry(node, node_list); 722 if (i915_node_color_differs(other, color) && 723 !drm_mm_hole_follows(other)) 724 return false; 725 726 other = list_next_entry(node, node_list); 727 if (i915_node_color_differs(other, color) && 728 !drm_mm_hole_follows(node)) 729 return false; 730 731 return true; 732 } 733 734 /** 735 * i915_vma_insert - finds a slot for the vma in its address space 736 * @vma: the vma 737 * @size: requested size in bytes (can be larger than the VMA) 738 * @alignment: required alignment 739 * @flags: mask of PIN_* flags to use 740 * 741 * First we try to allocate some free space that meets the requirements for 742 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 743 * preferrably the oldest idle entry to make room for the new VMA. 744 * 745 * Returns: 746 * 0 on success, negative error code otherwise. 747 */ 748 static int 749 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 750 u64 size, u64 alignment, u64 flags) 751 { 752 unsigned long color; 753 u64 start, end; 754 int ret; 755 756 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 757 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 758 759 size = max(size, vma->size); 760 alignment = max(alignment, vma->display_alignment); 761 if (flags & PIN_MAPPABLE) { 762 size = max_t(typeof(size), size, vma->fence_size); 763 alignment = max_t(typeof(alignment), 764 alignment, vma->fence_alignment); 765 } 766 767 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 768 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 769 GEM_BUG_ON(!is_power_of_2(alignment)); 770 771 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 772 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 773 774 end = vma->vm->total; 775 if (flags & PIN_MAPPABLE) 776 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 777 if (flags & PIN_ZONE_4G) 778 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 779 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 780 781 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj)); 782 783 /* If binding the object/GGTT view requires more space than the entire 784 * aperture has, reject it early before evicting everything in a vain 785 * attempt to find space. 786 */ 787 if (size > end) { 788 drm_dbg(&to_i915(vma->obj->base.dev)->drm, 789 "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 790 size, flags & PIN_MAPPABLE ? "mappable" : "total", end); 791 return -ENOSPC; 792 } 793 794 color = 0; 795 796 if (i915_vm_has_cache_coloring(vma->vm)) 797 color = vma->obj->cache_level; 798 799 if (flags & PIN_OFFSET_FIXED) { 800 u64 offset = flags & PIN_OFFSET_MASK; 801 if (!IS_ALIGNED(offset, alignment) || 802 range_overflows(offset, size, end)) 803 return -EINVAL; 804 805 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, 806 size, offset, color, 807 flags); 808 if (ret) 809 return ret; 810 } else { 811 /* 812 * We only support huge gtt pages through the 48b PPGTT, 813 * however we also don't want to force any alignment for 814 * objects which need to be tightly packed into the low 32bits. 815 * 816 * Note that we assume that GGTT are limited to 4GiB for the 817 * forseeable future. See also i915_ggtt_offset(). 818 */ 819 if (upper_32_bits(end - 1) && 820 vma->page_sizes.sg > I915_GTT_PAGE_SIZE && 821 !HAS_64K_PAGES(vma->vm->i915)) { 822 /* 823 * We can't mix 64K and 4K PTEs in the same page-table 824 * (2M block), and so to avoid the ugliness and 825 * complexity of coloring we opt for just aligning 64K 826 * objects to 2M. 827 */ 828 u64 page_alignment = 829 rounddown_pow_of_two(vma->page_sizes.sg | 830 I915_GTT_PAGE_SIZE_2M); 831 832 /* 833 * Check we don't expand for the limited Global GTT 834 * (mappable aperture is even more precious!). This 835 * also checks that we exclude the aliasing-ppgtt. 836 */ 837 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 838 839 alignment = max(alignment, page_alignment); 840 841 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 842 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 843 } 844 845 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node, 846 size, alignment, color, 847 start, end, flags); 848 if (ret) 849 return ret; 850 851 GEM_BUG_ON(vma->node.start < start); 852 GEM_BUG_ON(vma->node.start + vma->node.size > end); 853 } 854 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 855 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 856 857 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 858 859 return 0; 860 } 861 862 static void 863 i915_vma_detach(struct i915_vma *vma) 864 { 865 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 866 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 867 868 /* 869 * And finally now the object is completely decoupled from this 870 * vma, we can drop its hold on the backing storage and allow 871 * it to be reaped by the shrinker. 872 */ 873 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 874 } 875 876 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 877 { 878 unsigned int bound; 879 880 bound = atomic_read(&vma->flags); 881 882 if (flags & PIN_VALIDATE) { 883 flags &= I915_VMA_BIND_MASK; 884 885 return (flags & bound) == flags; 886 } 887 888 /* with the lock mandatory for unbind, we don't race here */ 889 flags &= I915_VMA_BIND_MASK; 890 do { 891 if (unlikely(flags & ~bound)) 892 return false; 893 894 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 895 return false; 896 897 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 898 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 899 900 return true; 901 } 902 903 static struct scatterlist * 904 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 905 unsigned int width, unsigned int height, 906 unsigned int src_stride, unsigned int dst_stride, 907 struct sg_table *st, struct scatterlist *sg) 908 { 909 unsigned int column, row; 910 unsigned int src_idx; 911 912 for (column = 0; column < width; column++) { 913 unsigned int left; 914 915 src_idx = src_stride * (height - 1) + column + offset; 916 for (row = 0; row < height; row++) { 917 st->nents++; 918 /* 919 * We don't need the pages, but need to initialize 920 * the entries so the sg list can be happily traversed. 921 * The only thing we need are DMA addresses. 922 */ 923 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 924 sg_dma_address(sg) = 925 i915_gem_object_get_dma_address(obj, src_idx); 926 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 927 sg = sg_next(sg); 928 src_idx -= src_stride; 929 } 930 931 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; 932 933 if (!left) 934 continue; 935 936 st->nents++; 937 938 /* 939 * The DE ignores the PTEs for the padding tiles, the sg entry 940 * here is just a conenience to indicate how many padding PTEs 941 * to insert at this spot. 942 */ 943 sg_set_page(sg, NULL, left, 0); 944 sg_dma_address(sg) = 0; 945 sg_dma_len(sg) = left; 946 sg = sg_next(sg); 947 } 948 949 return sg; 950 } 951 952 static noinline struct sg_table * 953 intel_rotate_pages(struct intel_rotation_info *rot_info, 954 struct drm_i915_gem_object *obj) 955 { 956 unsigned int size = intel_rotation_info_size(rot_info); 957 struct drm_i915_private *i915 = to_i915(obj->base.dev); 958 struct sg_table *st; 959 struct scatterlist *sg; 960 int ret = -ENOMEM; 961 int i; 962 963 /* Allocate target SG list. */ 964 st = kmalloc(sizeof(*st), GFP_KERNEL); 965 if (!st) 966 goto err_st_alloc; 967 968 ret = sg_alloc_table(st, size, GFP_KERNEL); 969 if (ret) 970 goto err_sg_alloc; 971 972 st->nents = 0; 973 sg = st->sgl; 974 975 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 976 sg = rotate_pages(obj, rot_info->plane[i].offset, 977 rot_info->plane[i].width, rot_info->plane[i].height, 978 rot_info->plane[i].src_stride, 979 rot_info->plane[i].dst_stride, 980 st, sg); 981 982 return st; 983 984 err_sg_alloc: 985 kfree(st); 986 err_st_alloc: 987 988 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 989 obj->base.size, rot_info->plane[0].width, 990 rot_info->plane[0].height, size); 991 992 return ERR_PTR(ret); 993 } 994 995 static struct scatterlist * 996 add_padding_pages(unsigned int count, 997 struct sg_table *st, struct scatterlist *sg) 998 { 999 st->nents++; 1000 1001 /* 1002 * The DE ignores the PTEs for the padding tiles, the sg entry 1003 * here is just a convenience to indicate how many padding PTEs 1004 * to insert at this spot. 1005 */ 1006 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); 1007 sg_dma_address(sg) = 0; 1008 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; 1009 sg = sg_next(sg); 1010 1011 return sg; 1012 } 1013 1014 static struct scatterlist * 1015 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, 1016 unsigned int offset, unsigned int alignment_pad, 1017 unsigned int width, unsigned int height, 1018 unsigned int src_stride, unsigned int dst_stride, 1019 struct sg_table *st, struct scatterlist *sg, 1020 unsigned int *gtt_offset) 1021 { 1022 unsigned int row; 1023 1024 if (!width || !height) 1025 return sg; 1026 1027 if (alignment_pad) 1028 sg = add_padding_pages(alignment_pad, st, sg); 1029 1030 for (row = 0; row < height; row++) { 1031 unsigned int left = width * I915_GTT_PAGE_SIZE; 1032 1033 while (left) { 1034 dma_addr_t addr; 1035 unsigned int length; 1036 1037 /* 1038 * We don't need the pages, but need to initialize 1039 * the entries so the sg list can be happily traversed. 1040 * The only thing we need are DMA addresses. 1041 */ 1042 1043 addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 1044 1045 length = min(left, length); 1046 1047 st->nents++; 1048 1049 sg_set_page(sg, NULL, length, 0); 1050 sg_dma_address(sg) = addr; 1051 sg_dma_len(sg) = length; 1052 sg = sg_next(sg); 1053 1054 offset += length / I915_GTT_PAGE_SIZE; 1055 left -= length; 1056 } 1057 1058 offset += src_stride - width; 1059 1060 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; 1061 1062 if (!left) 1063 continue; 1064 1065 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); 1066 } 1067 1068 *gtt_offset += alignment_pad + dst_stride * height; 1069 1070 return sg; 1071 } 1072 1073 static struct scatterlist * 1074 remap_contiguous_pages(struct drm_i915_gem_object *obj, 1075 unsigned int obj_offset, 1076 unsigned int count, 1077 struct sg_table *st, struct scatterlist *sg) 1078 { 1079 struct scatterlist *iter; 1080 unsigned int offset; 1081 1082 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); 1083 GEM_BUG_ON(!iter); 1084 1085 do { 1086 unsigned int len; 1087 1088 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1089 count << PAGE_SHIFT); 1090 sg_set_page(sg, NULL, len, 0); 1091 sg_dma_address(sg) = 1092 sg_dma_address(iter) + (offset << PAGE_SHIFT); 1093 sg_dma_len(sg) = len; 1094 1095 st->nents++; 1096 count -= len >> PAGE_SHIFT; 1097 if (count == 0) 1098 return sg; 1099 1100 sg = __sg_next(sg); 1101 iter = __sg_next(iter); 1102 offset = 0; 1103 } while (1); 1104 } 1105 1106 static struct scatterlist * 1107 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, 1108 unsigned int obj_offset, unsigned int alignment_pad, 1109 unsigned int size, 1110 struct sg_table *st, struct scatterlist *sg, 1111 unsigned int *gtt_offset) 1112 { 1113 if (!size) 1114 return sg; 1115 1116 if (alignment_pad) 1117 sg = add_padding_pages(alignment_pad, st, sg); 1118 1119 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); 1120 sg = sg_next(sg); 1121 1122 *gtt_offset += alignment_pad + size; 1123 1124 return sg; 1125 } 1126 1127 static struct scatterlist * 1128 remap_color_plane_pages(const struct intel_remapped_info *rem_info, 1129 struct drm_i915_gem_object *obj, 1130 int color_plane, 1131 struct sg_table *st, struct scatterlist *sg, 1132 unsigned int *gtt_offset) 1133 { 1134 unsigned int alignment_pad = 0; 1135 1136 if (rem_info->plane_alignment) 1137 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; 1138 1139 if (rem_info->plane[color_plane].linear) 1140 sg = remap_linear_color_plane_pages(obj, 1141 rem_info->plane[color_plane].offset, 1142 alignment_pad, 1143 rem_info->plane[color_plane].size, 1144 st, sg, 1145 gtt_offset); 1146 1147 else 1148 sg = remap_tiled_color_plane_pages(obj, 1149 rem_info->plane[color_plane].offset, 1150 alignment_pad, 1151 rem_info->plane[color_plane].width, 1152 rem_info->plane[color_plane].height, 1153 rem_info->plane[color_plane].src_stride, 1154 rem_info->plane[color_plane].dst_stride, 1155 st, sg, 1156 gtt_offset); 1157 1158 return sg; 1159 } 1160 1161 static noinline struct sg_table * 1162 intel_remap_pages(struct intel_remapped_info *rem_info, 1163 struct drm_i915_gem_object *obj) 1164 { 1165 unsigned int size = intel_remapped_info_size(rem_info); 1166 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1167 struct sg_table *st; 1168 struct scatterlist *sg; 1169 unsigned int gtt_offset = 0; 1170 int ret = -ENOMEM; 1171 int i; 1172 1173 /* Allocate target SG list. */ 1174 st = kmalloc(sizeof(*st), GFP_KERNEL); 1175 if (!st) 1176 goto err_st_alloc; 1177 1178 ret = sg_alloc_table(st, size, GFP_KERNEL); 1179 if (ret) 1180 goto err_sg_alloc; 1181 1182 st->nents = 0; 1183 sg = st->sgl; 1184 1185 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 1186 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); 1187 1188 i915_sg_trim(st); 1189 1190 return st; 1191 1192 err_sg_alloc: 1193 kfree(st); 1194 err_st_alloc: 1195 1196 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 1197 obj->base.size, rem_info->plane[0].width, 1198 rem_info->plane[0].height, size); 1199 1200 return ERR_PTR(ret); 1201 } 1202 1203 static noinline struct sg_table * 1204 intel_partial_pages(const struct i915_gtt_view *view, 1205 struct drm_i915_gem_object *obj) 1206 { 1207 struct sg_table *st; 1208 struct scatterlist *sg; 1209 unsigned int count = view->partial.size; 1210 int ret = -ENOMEM; 1211 1212 st = kmalloc(sizeof(*st), GFP_KERNEL); 1213 if (!st) 1214 goto err_st_alloc; 1215 1216 ret = sg_alloc_table(st, count, GFP_KERNEL); 1217 if (ret) 1218 goto err_sg_alloc; 1219 1220 st->nents = 0; 1221 1222 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); 1223 1224 sg_mark_end(sg); 1225 i915_sg_trim(st); /* Drop any unused tail entries. */ 1226 1227 return st; 1228 1229 err_sg_alloc: 1230 kfree(st); 1231 err_st_alloc: 1232 return ERR_PTR(ret); 1233 } 1234 1235 static int 1236 __i915_vma_get_pages(struct i915_vma *vma) 1237 { 1238 struct sg_table *pages; 1239 1240 /* 1241 * The vma->pages are only valid within the lifespan of the borrowed 1242 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 1243 * must be the vma->pages. A simple rule is that vma->pages must only 1244 * be accessed when the obj->mm.pages are pinned. 1245 */ 1246 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 1247 1248 switch (vma->gtt_view.type) { 1249 default: 1250 GEM_BUG_ON(vma->gtt_view.type); 1251 fallthrough; 1252 case I915_GTT_VIEW_NORMAL: 1253 pages = vma->obj->mm.pages; 1254 break; 1255 1256 case I915_GTT_VIEW_ROTATED: 1257 pages = 1258 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj); 1259 break; 1260 1261 case I915_GTT_VIEW_REMAPPED: 1262 pages = 1263 intel_remap_pages(&vma->gtt_view.remapped, vma->obj); 1264 break; 1265 1266 case I915_GTT_VIEW_PARTIAL: 1267 pages = intel_partial_pages(&vma->gtt_view, vma->obj); 1268 break; 1269 } 1270 1271 if (IS_ERR(pages)) { 1272 drm_err(&vma->vm->i915->drm, 1273 "Failed to get pages for VMA view type %u (%ld)!\n", 1274 vma->gtt_view.type, PTR_ERR(pages)); 1275 return PTR_ERR(pages); 1276 } 1277 1278 vma->pages = pages; 1279 1280 return 0; 1281 } 1282 1283 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) 1284 { 1285 int err; 1286 1287 if (atomic_add_unless(&vma->pages_count, 1, 0)) 1288 return 0; 1289 1290 err = i915_gem_object_pin_pages(vma->obj); 1291 if (err) 1292 return err; 1293 1294 err = __i915_vma_get_pages(vma); 1295 if (err) 1296 goto err_unpin; 1297 1298 vma->page_sizes = vma->obj->mm.page_sizes; 1299 atomic_inc(&vma->pages_count); 1300 1301 return 0; 1302 1303 err_unpin: 1304 __i915_gem_object_unpin_pages(vma->obj); 1305 1306 return err; 1307 } 1308 1309 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb) 1310 { 1311 /* 1312 * Before we release the pages that were bound by this vma, we 1313 * must invalidate all the TLBs that may still have a reference 1314 * back to our physical address. It only needs to be done once, 1315 * so after updating the PTE to point away from the pages, record 1316 * the most recent TLB invalidation seqno, and if we have not yet 1317 * flushed the TLBs upon release, perform a full invalidation. 1318 */ 1319 WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt)); 1320 } 1321 1322 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 1323 { 1324 /* We allocate under vma_get_pages, so beware the shrinker */ 1325 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 1326 1327 if (atomic_sub_return(count, &vma->pages_count) == 0) { 1328 if (vma->pages != vma->obj->mm.pages) { 1329 sg_free_table(vma->pages); 1330 kfree(vma->pages); 1331 } 1332 vma->pages = NULL; 1333 1334 i915_gem_object_unpin_pages(vma->obj); 1335 } 1336 } 1337 1338 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) 1339 { 1340 if (atomic_add_unless(&vma->pages_count, -1, 1)) 1341 return; 1342 1343 __vma_put_pages(vma, 1); 1344 } 1345 1346 static void vma_unbind_pages(struct i915_vma *vma) 1347 { 1348 unsigned int count; 1349 1350 lockdep_assert_held(&vma->vm->mutex); 1351 1352 /* The upper portion of pages_count is the number of bindings */ 1353 count = atomic_read(&vma->pages_count); 1354 count >>= I915_VMA_PAGES_BIAS; 1355 GEM_BUG_ON(!count); 1356 1357 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 1358 } 1359 1360 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1361 u64 size, u64 alignment, u64 flags) 1362 { 1363 struct i915_vma_work *work = NULL; 1364 struct dma_fence *moving = NULL; 1365 struct i915_vma_resource *vma_res = NULL; 1366 intel_wakeref_t wakeref = 0; 1367 unsigned int bound; 1368 int err; 1369 1370 assert_vma_held(vma); 1371 GEM_BUG_ON(!ww); 1372 1373 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 1374 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 1375 1376 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 1377 1378 /* First try and grab the pin without rebinding the vma */ 1379 if (try_qad_pin(vma, flags)) 1380 return 0; 1381 1382 err = i915_vma_get_pages(vma); 1383 if (err) 1384 return err; 1385 1386 if (flags & PIN_GLOBAL) 1387 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 1388 1389 if (flags & vma->vm->bind_async_flags) { 1390 /* lock VM */ 1391 err = i915_vm_lock_objects(vma->vm, ww); 1392 if (err) 1393 goto err_rpm; 1394 1395 work = i915_vma_work(); 1396 if (!work) { 1397 err = -ENOMEM; 1398 goto err_rpm; 1399 } 1400 1401 work->vm = vma->vm; 1402 1403 err = i915_gem_object_get_moving_fence(vma->obj, &moving); 1404 if (err) 1405 goto err_rpm; 1406 1407 dma_fence_work_chain(&work->base, moving); 1408 1409 /* Allocate enough page directories to used PTE */ 1410 if (vma->vm->allocate_va_range) { 1411 err = i915_vm_alloc_pt_stash(vma->vm, 1412 &work->stash, 1413 vma->size); 1414 if (err) 1415 goto err_fence; 1416 1417 err = i915_vm_map_pt_stash(vma->vm, &work->stash); 1418 if (err) 1419 goto err_fence; 1420 } 1421 } 1422 1423 vma_res = i915_vma_resource_alloc(); 1424 if (IS_ERR(vma_res)) { 1425 err = PTR_ERR(vma_res); 1426 goto err_fence; 1427 } 1428 1429 /* 1430 * Differentiate between user/kernel vma inside the aliasing-ppgtt. 1431 * 1432 * We conflate the Global GTT with the user's vma when using the 1433 * aliasing-ppgtt, but it is still vitally important to try and 1434 * keep the use cases distinct. For example, userptr objects are 1435 * not allowed inside the Global GTT as that will cause lock 1436 * inversions when we have to evict them the mmu_notifier callbacks - 1437 * but they are allowed to be part of the user ppGTT which can never 1438 * be mapped. As such we try to give the distinct users of the same 1439 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 1440 * and i915_ppgtt separate]. 1441 * 1442 * NB this may cause us to mask real lock inversions -- while the 1443 * code is safe today, lockdep may not be able to spot future 1444 * transgressions. 1445 */ 1446 err = mutex_lock_interruptible_nested(&vma->vm->mutex, 1447 !(flags & PIN_GLOBAL)); 1448 if (err) 1449 goto err_vma_res; 1450 1451 /* No more allocations allowed now we hold vm->mutex */ 1452 1453 if (unlikely(i915_vma_is_closed(vma))) { 1454 err = -ENOENT; 1455 goto err_unlock; 1456 } 1457 1458 bound = atomic_read(&vma->flags); 1459 if (unlikely(bound & I915_VMA_ERROR)) { 1460 err = -ENOMEM; 1461 goto err_unlock; 1462 } 1463 1464 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 1465 err = -EAGAIN; /* pins are meant to be fairly temporary */ 1466 goto err_unlock; 1467 } 1468 1469 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 1470 if (!(flags & PIN_VALIDATE)) 1471 __i915_vma_pin(vma); 1472 goto err_unlock; 1473 } 1474 1475 err = i915_active_acquire(&vma->active); 1476 if (err) 1477 goto err_unlock; 1478 1479 if (!(bound & I915_VMA_BIND_MASK)) { 1480 err = i915_vma_insert(vma, ww, size, alignment, flags); 1481 if (err) 1482 goto err_active; 1483 1484 if (i915_is_ggtt(vma->vm)) 1485 __i915_vma_set_map_and_fenceable(vma); 1486 } 1487 1488 GEM_BUG_ON(!vma->pages); 1489 err = i915_vma_bind(vma, 1490 vma->obj->cache_level, 1491 flags, work, vma_res); 1492 vma_res = NULL; 1493 if (err) 1494 goto err_remove; 1495 1496 /* There should only be at most 2 active bindings (user, global) */ 1497 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 1498 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 1499 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 1500 1501 if (!(flags & PIN_VALIDATE)) { 1502 __i915_vma_pin(vma); 1503 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1504 } 1505 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 1506 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 1507 1508 err_remove: 1509 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 1510 i915_vma_detach(vma); 1511 drm_mm_remove_node(&vma->node); 1512 } 1513 err_active: 1514 i915_active_release(&vma->active); 1515 err_unlock: 1516 mutex_unlock(&vma->vm->mutex); 1517 err_vma_res: 1518 i915_vma_resource_free(vma_res); 1519 err_fence: 1520 if (work) 1521 dma_fence_work_commit_imm(&work->base); 1522 err_rpm: 1523 if (wakeref) 1524 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1525 1526 if (moving) 1527 dma_fence_put(moving); 1528 1529 i915_vma_put_pages(vma); 1530 return err; 1531 } 1532 1533 static void flush_idle_contexts(struct intel_gt *gt) 1534 { 1535 struct intel_engine_cs *engine; 1536 enum intel_engine_id id; 1537 1538 for_each_engine(engine, gt, id) 1539 intel_engine_flush_barriers(engine); 1540 1541 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 1542 } 1543 1544 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1545 u32 align, unsigned int flags) 1546 { 1547 struct i915_address_space *vm = vma->vm; 1548 struct intel_gt *gt; 1549 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 1550 int err; 1551 1552 do { 1553 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); 1554 1555 if (err != -ENOSPC) { 1556 if (!err) { 1557 err = i915_vma_wait_for_bind(vma); 1558 if (err) 1559 i915_vma_unpin(vma); 1560 } 1561 return err; 1562 } 1563 1564 /* Unlike i915_vma_pin, we don't take no for an answer! */ 1565 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) 1566 flush_idle_contexts(gt); 1567 if (mutex_lock_interruptible(&vm->mutex) == 0) { 1568 /* 1569 * We pass NULL ww here, as we don't want to unbind 1570 * locked objects when called from execbuf when pinning 1571 * is removed. This would probably regress badly. 1572 */ 1573 i915_gem_evict_vm(vm, NULL); 1574 mutex_unlock(&vm->mutex); 1575 } 1576 } while (1); 1577 } 1578 1579 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1580 u32 align, unsigned int flags) 1581 { 1582 struct i915_gem_ww_ctx _ww; 1583 int err; 1584 1585 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1586 1587 if (ww) 1588 return __i915_ggtt_pin(vma, ww, align, flags); 1589 1590 lockdep_assert_not_held(&vma->obj->base.resv->lock.base); 1591 1592 for_i915_gem_ww(&_ww, err, true) { 1593 err = i915_gem_object_lock(vma->obj, &_ww); 1594 if (!err) 1595 err = __i915_ggtt_pin(vma, &_ww, align, flags); 1596 } 1597 1598 return err; 1599 } 1600 1601 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) 1602 { 1603 /* 1604 * We defer actually closing, unbinding and destroying the VMA until 1605 * the next idle point, or if the object is freed in the meantime. By 1606 * postponing the unbind, we allow for it to be resurrected by the 1607 * client, avoiding the work required to rebind the VMA. This is 1608 * advantageous for DRI, where the client/server pass objects 1609 * between themselves, temporarily opening a local VMA to the 1610 * object, and then closing it again. The same object is then reused 1611 * on the next frame (or two, depending on the depth of the swap queue) 1612 * causing us to rebind the VMA once more. This ends up being a lot 1613 * of wasted work for the steady state. 1614 */ 1615 GEM_BUG_ON(i915_vma_is_closed(vma)); 1616 list_add(&vma->closed_link, >->closed_vma); 1617 } 1618 1619 void i915_vma_close(struct i915_vma *vma) 1620 { 1621 struct intel_gt *gt = vma->vm->gt; 1622 unsigned long flags; 1623 1624 if (i915_vma_is_ggtt(vma)) 1625 return; 1626 1627 GEM_BUG_ON(!atomic_read(&vma->open_count)); 1628 if (atomic_dec_and_lock_irqsave(&vma->open_count, 1629 >->closed_lock, 1630 flags)) { 1631 __vma_close(vma, gt); 1632 spin_unlock_irqrestore(>->closed_lock, flags); 1633 } 1634 } 1635 1636 static void __i915_vma_remove_closed(struct i915_vma *vma) 1637 { 1638 list_del_init(&vma->closed_link); 1639 } 1640 1641 void i915_vma_reopen(struct i915_vma *vma) 1642 { 1643 struct intel_gt *gt = vma->vm->gt; 1644 1645 spin_lock_irq(>->closed_lock); 1646 if (i915_vma_is_closed(vma)) 1647 __i915_vma_remove_closed(vma); 1648 spin_unlock_irq(>->closed_lock); 1649 } 1650 1651 static void force_unbind(struct i915_vma *vma) 1652 { 1653 if (!drm_mm_node_allocated(&vma->node)) 1654 return; 1655 1656 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 1657 WARN_ON(__i915_vma_unbind(vma)); 1658 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 1659 } 1660 1661 static void release_references(struct i915_vma *vma, struct intel_gt *gt, 1662 bool vm_ddestroy) 1663 { 1664 struct drm_i915_gem_object *obj = vma->obj; 1665 1666 GEM_BUG_ON(i915_vma_is_active(vma)); 1667 1668 spin_lock(&obj->vma.lock); 1669 list_del(&vma->obj_link); 1670 if (!RB_EMPTY_NODE(&vma->obj_node)) 1671 rb_erase(&vma->obj_node, &obj->vma.tree); 1672 1673 spin_unlock(&obj->vma.lock); 1674 1675 spin_lock_irq(>->closed_lock); 1676 __i915_vma_remove_closed(vma); 1677 spin_unlock_irq(>->closed_lock); 1678 1679 if (vm_ddestroy) 1680 i915_vm_resv_put(vma->vm); 1681 1682 i915_active_fini(&vma->active); 1683 GEM_WARN_ON(vma->resource); 1684 i915_vma_free(vma); 1685 } 1686 1687 /** 1688 * i915_vma_destroy_locked - Remove all weak reference to the vma and put 1689 * the initial reference. 1690 * 1691 * This function should be called when it's decided the vma isn't needed 1692 * anymore. The caller must assure that it doesn't race with another lookup 1693 * plus destroy, typically by taking an appropriate reference. 1694 * 1695 * Current callsites are 1696 * - __i915_gem_object_pages_fini() 1697 * - __i915_vm_close() - Blocks the above function by taking a reference on 1698 * the object. 1699 * - __i915_vma_parked() - Blocks the above functions by taking a reference 1700 * on the vm and a reference on the object. Also takes the object lock so 1701 * destruction from __i915_vma_parked() can be blocked by holding the 1702 * object lock. Since the object lock is only allowed from within i915 with 1703 * an object refcount, holding the object lock also implicitly blocks the 1704 * vma freeing from __i915_gem_object_pages_fini(). 1705 * 1706 * Because of locks taken during destruction, a vma is also guaranteed to 1707 * stay alive while the following locks are held if it was looked up while 1708 * holding one of the locks: 1709 * - vm->mutex 1710 * - obj->vma.lock 1711 * - gt->closed_lock 1712 */ 1713 void i915_vma_destroy_locked(struct i915_vma *vma) 1714 { 1715 lockdep_assert_held(&vma->vm->mutex); 1716 1717 force_unbind(vma); 1718 list_del_init(&vma->vm_link); 1719 release_references(vma, vma->vm->gt, false); 1720 } 1721 1722 void i915_vma_destroy(struct i915_vma *vma) 1723 { 1724 struct intel_gt *gt; 1725 bool vm_ddestroy; 1726 1727 mutex_lock(&vma->vm->mutex); 1728 force_unbind(vma); 1729 list_del_init(&vma->vm_link); 1730 vm_ddestroy = vma->vm_ddestroy; 1731 vma->vm_ddestroy = false; 1732 1733 /* vma->vm may be freed when releasing vma->vm->mutex. */ 1734 gt = vma->vm->gt; 1735 mutex_unlock(&vma->vm->mutex); 1736 release_references(vma, gt, vm_ddestroy); 1737 } 1738 1739 void i915_vma_parked(struct intel_gt *gt) 1740 { 1741 struct i915_vma *vma, *next; 1742 LIST_HEAD(closed); 1743 1744 spin_lock_irq(>->closed_lock); 1745 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 1746 struct drm_i915_gem_object *obj = vma->obj; 1747 struct i915_address_space *vm = vma->vm; 1748 1749 /* XXX All to avoid keeping a reference on i915_vma itself */ 1750 1751 if (!kref_get_unless_zero(&obj->base.refcount)) 1752 continue; 1753 1754 if (!i915_vm_tryget(vm)) { 1755 i915_gem_object_put(obj); 1756 continue; 1757 } 1758 1759 list_move(&vma->closed_link, &closed); 1760 } 1761 spin_unlock_irq(>->closed_lock); 1762 1763 /* As the GT is held idle, no vma can be reopened as we destroy them */ 1764 list_for_each_entry_safe(vma, next, &closed, closed_link) { 1765 struct drm_i915_gem_object *obj = vma->obj; 1766 struct i915_address_space *vm = vma->vm; 1767 1768 if (i915_gem_object_trylock(obj, NULL)) { 1769 INIT_LIST_HEAD(&vma->closed_link); 1770 i915_vma_destroy(vma); 1771 i915_gem_object_unlock(obj); 1772 } else { 1773 /* back you go.. */ 1774 spin_lock_irq(>->closed_lock); 1775 list_add(&vma->closed_link, >->closed_vma); 1776 spin_unlock_irq(>->closed_lock); 1777 } 1778 1779 i915_gem_object_put(obj); 1780 i915_vm_put(vm); 1781 } 1782 } 1783 1784 static void __i915_vma_iounmap(struct i915_vma *vma) 1785 { 1786 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1787 1788 if (vma->iomap == NULL) 1789 return; 1790 1791 if (page_unmask_bits(vma->iomap)) 1792 __i915_gem_object_release_map(vma->obj); 1793 else 1794 io_mapping_unmap(vma->iomap); 1795 vma->iomap = NULL; 1796 } 1797 1798 void i915_vma_revoke_mmap(struct i915_vma *vma) 1799 { 1800 struct drm_vma_offset_node *node; 1801 u64 vma_offset; 1802 1803 if (!i915_vma_has_userfault(vma)) 1804 return; 1805 1806 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1807 GEM_BUG_ON(!vma->obj->userfault_count); 1808 1809 node = &vma->mmo->vma_node; 1810 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT; 1811 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1812 drm_vma_node_offset_addr(node) + vma_offset, 1813 vma->size, 1814 1); 1815 1816 i915_vma_unset_userfault(vma); 1817 if (!--vma->obj->userfault_count) 1818 list_del(&vma->obj->userfault_link); 1819 } 1820 1821 static int 1822 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) 1823 { 1824 return __i915_request_await_exclusive(rq, &vma->active); 1825 } 1826 1827 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 1828 { 1829 int err; 1830 1831 /* Wait for the vma to be bound before we start! */ 1832 err = __i915_request_await_bind(rq, vma); 1833 if (err) 1834 return err; 1835 1836 return i915_active_add_request(&vma->active, rq); 1837 } 1838 1839 int _i915_vma_move_to_active(struct i915_vma *vma, 1840 struct i915_request *rq, 1841 struct dma_fence *fence, 1842 unsigned int flags) 1843 { 1844 struct drm_i915_gem_object *obj = vma->obj; 1845 int err; 1846 1847 assert_object_held(obj); 1848 1849 GEM_BUG_ON(!vma->pages); 1850 1851 if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) { 1852 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE); 1853 if (unlikely(err)) 1854 return err; 1855 } 1856 err = __i915_vma_move_to_active(vma, rq); 1857 if (unlikely(err)) 1858 return err; 1859 1860 /* 1861 * Reserve fences slot early to prevent an allocation after preparing 1862 * the workload and associating fences with dma_resv. 1863 */ 1864 if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { 1865 struct dma_fence *curr; 1866 int idx; 1867 1868 dma_fence_array_for_each(curr, idx, fence) 1869 ; 1870 err = dma_resv_reserve_fences(vma->obj->base.resv, idx); 1871 if (unlikely(err)) 1872 return err; 1873 } 1874 1875 if (flags & EXEC_OBJECT_WRITE) { 1876 struct intel_frontbuffer *front; 1877 1878 front = __intel_frontbuffer_get(obj); 1879 if (unlikely(front)) { 1880 if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1881 i915_active_add_request(&front->write, rq); 1882 intel_frontbuffer_put(front); 1883 } 1884 } 1885 1886 if (fence) { 1887 struct dma_fence *curr; 1888 enum dma_resv_usage usage; 1889 int idx; 1890 1891 if (flags & EXEC_OBJECT_WRITE) { 1892 usage = DMA_RESV_USAGE_WRITE; 1893 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1894 obj->read_domains = 0; 1895 } else { 1896 usage = DMA_RESV_USAGE_READ; 1897 obj->write_domain = 0; 1898 } 1899 1900 dma_fence_array_for_each(curr, idx, fence) 1901 dma_resv_add_fence(vma->obj->base.resv, curr, usage); 1902 } 1903 1904 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 1905 i915_active_add_request(&vma->fence->active, rq); 1906 1907 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1908 obj->mm.dirty = true; 1909 1910 GEM_BUG_ON(!i915_vma_is_active(vma)); 1911 return 0; 1912 } 1913 1914 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) 1915 { 1916 struct i915_vma_resource *vma_res = vma->resource; 1917 struct dma_fence *unbind_fence; 1918 1919 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1920 assert_vma_held_evict(vma); 1921 1922 if (i915_vma_is_map_and_fenceable(vma)) { 1923 /* Force a pagefault for domain tracking on next user access */ 1924 i915_vma_revoke_mmap(vma); 1925 1926 /* 1927 * Check that we have flushed all writes through the GGTT 1928 * before the unbind, other due to non-strict nature of those 1929 * indirect writes they may end up referencing the GGTT PTE 1930 * after the unbind. 1931 * 1932 * Note that we may be concurrently poking at the GGTT_WRITE 1933 * bit from set-domain, as we mark all GGTT vma associated 1934 * with an object. We know this is for another vma, as we 1935 * are currently unbinding this one -- so if this vma will be 1936 * reused, it will be refaulted and have its dirty bit set 1937 * before the next write. 1938 */ 1939 i915_vma_flush_writes(vma); 1940 1941 /* release the fence reg _after_ flushing */ 1942 i915_vma_revoke_fence(vma); 1943 1944 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1945 } 1946 1947 __i915_vma_iounmap(vma); 1948 1949 GEM_BUG_ON(vma->fence); 1950 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1951 1952 /* Object backend must be async capable. */ 1953 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); 1954 1955 /* If vm is not open, unbind is a nop. */ 1956 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && 1957 kref_read(&vma->vm->ref); 1958 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) || 1959 vma->vm->skip_pte_rewrite; 1960 trace_i915_vma_unbind(vma); 1961 1962 if (async) 1963 unbind_fence = i915_vma_resource_unbind(vma_res, 1964 &vma->obj->mm.tlb); 1965 else 1966 unbind_fence = i915_vma_resource_unbind(vma_res, NULL); 1967 1968 vma->resource = NULL; 1969 1970 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 1971 &vma->flags); 1972 1973 i915_vma_detach(vma); 1974 1975 if (!async) { 1976 if (unbind_fence) { 1977 dma_fence_wait(unbind_fence, false); 1978 dma_fence_put(unbind_fence); 1979 unbind_fence = NULL; 1980 } 1981 vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb); 1982 } 1983 1984 /* 1985 * Binding itself may not have completed until the unbind fence signals, 1986 * so don't drop the pages until that happens, unless the resource is 1987 * async_capable. 1988 */ 1989 1990 vma_unbind_pages(vma); 1991 return unbind_fence; 1992 } 1993 1994 int __i915_vma_unbind(struct i915_vma *vma) 1995 { 1996 int ret; 1997 1998 lockdep_assert_held(&vma->vm->mutex); 1999 assert_vma_held_evict(vma); 2000 2001 if (!drm_mm_node_allocated(&vma->node)) 2002 return 0; 2003 2004 if (i915_vma_is_pinned(vma)) { 2005 vma_print_allocator(vma, "is pinned"); 2006 return -EAGAIN; 2007 } 2008 2009 /* 2010 * After confirming that no one else is pinning this vma, wait for 2011 * any laggards who may have crept in during the wait (through 2012 * a residual pin skipping the vm->mutex) to complete. 2013 */ 2014 ret = i915_vma_sync(vma); 2015 if (ret) 2016 return ret; 2017 2018 GEM_BUG_ON(i915_vma_is_active(vma)); 2019 __i915_vma_evict(vma, false); 2020 2021 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 2022 return 0; 2023 } 2024 2025 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) 2026 { 2027 struct dma_fence *fence; 2028 2029 lockdep_assert_held(&vma->vm->mutex); 2030 2031 if (!drm_mm_node_allocated(&vma->node)) 2032 return NULL; 2033 2034 if (i915_vma_is_pinned(vma) || 2035 &vma->obj->mm.rsgt->table != vma->resource->bi.pages) 2036 return ERR_PTR(-EAGAIN); 2037 2038 /* 2039 * We probably need to replace this with awaiting the fences of the 2040 * object's dma_resv when the vma active goes away. When doing that 2041 * we need to be careful to not add the vma_resource unbind fence 2042 * immediately to the object's dma_resv, because then unbinding 2043 * the next vma from the object, in case there are many, will 2044 * actually await the unbinding of the previous vmas, which is 2045 * undesirable. 2046 */ 2047 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active, 2048 I915_ACTIVE_AWAIT_EXCL | 2049 I915_ACTIVE_AWAIT_ACTIVE) < 0) { 2050 return ERR_PTR(-EBUSY); 2051 } 2052 2053 fence = __i915_vma_evict(vma, true); 2054 2055 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 2056 2057 return fence; 2058 } 2059 2060 int i915_vma_unbind(struct i915_vma *vma) 2061 { 2062 struct i915_address_space *vm = vma->vm; 2063 intel_wakeref_t wakeref = 0; 2064 int err; 2065 2066 assert_object_held_shared(vma->obj); 2067 2068 /* Optimistic wait before taking the mutex */ 2069 err = i915_vma_sync(vma); 2070 if (err) 2071 return err; 2072 2073 if (!drm_mm_node_allocated(&vma->node)) 2074 return 0; 2075 2076 if (i915_vma_is_pinned(vma)) { 2077 vma_print_allocator(vma, "is pinned"); 2078 return -EAGAIN; 2079 } 2080 2081 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 2082 /* XXX not always required: nop_clear_range */ 2083 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 2084 2085 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 2086 if (err) 2087 goto out_rpm; 2088 2089 err = __i915_vma_unbind(vma); 2090 mutex_unlock(&vm->mutex); 2091 2092 out_rpm: 2093 if (wakeref) 2094 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 2095 return err; 2096 } 2097 2098 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) 2099 { 2100 struct drm_i915_gem_object *obj = vma->obj; 2101 struct i915_address_space *vm = vma->vm; 2102 intel_wakeref_t wakeref = 0; 2103 struct dma_fence *fence; 2104 int err; 2105 2106 /* 2107 * We need the dma-resv lock since we add the 2108 * unbind fence to the dma-resv object. 2109 */ 2110 assert_object_held(obj); 2111 2112 if (!drm_mm_node_allocated(&vma->node)) 2113 return 0; 2114 2115 if (i915_vma_is_pinned(vma)) { 2116 vma_print_allocator(vma, "is pinned"); 2117 return -EAGAIN; 2118 } 2119 2120 if (!obj->mm.rsgt) 2121 return -EBUSY; 2122 2123 err = dma_resv_reserve_fences(obj->base.resv, 1); 2124 if (err) 2125 return -EBUSY; 2126 2127 /* 2128 * It would be great if we could grab this wakeref from the 2129 * async unbind work if needed, but we can't because it uses 2130 * kmalloc and it's in the dma-fence signalling critical path. 2131 */ 2132 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 2133 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 2134 2135 if (trylock_vm && !mutex_trylock(&vm->mutex)) { 2136 err = -EBUSY; 2137 goto out_rpm; 2138 } else if (!trylock_vm) { 2139 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); 2140 if (err) 2141 goto out_rpm; 2142 } 2143 2144 fence = __i915_vma_unbind_async(vma); 2145 mutex_unlock(&vm->mutex); 2146 if (IS_ERR_OR_NULL(fence)) { 2147 err = PTR_ERR_OR_ZERO(fence); 2148 goto out_rpm; 2149 } 2150 2151 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); 2152 dma_fence_put(fence); 2153 2154 out_rpm: 2155 if (wakeref) 2156 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 2157 return err; 2158 } 2159 2160 int i915_vma_unbind_unlocked(struct i915_vma *vma) 2161 { 2162 int err; 2163 2164 i915_gem_object_lock(vma->obj, NULL); 2165 err = i915_vma_unbind(vma); 2166 i915_gem_object_unlock(vma->obj); 2167 2168 return err; 2169 } 2170 2171 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 2172 { 2173 i915_gem_object_make_unshrinkable(vma->obj); 2174 return vma; 2175 } 2176 2177 void i915_vma_make_shrinkable(struct i915_vma *vma) 2178 { 2179 i915_gem_object_make_shrinkable(vma->obj); 2180 } 2181 2182 void i915_vma_make_purgeable(struct i915_vma *vma) 2183 { 2184 i915_gem_object_make_purgeable(vma->obj); 2185 } 2186 2187 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2188 #include "selftests/i915_vma.c" 2189 #endif 2190 2191 void i915_vma_module_exit(void) 2192 { 2193 kmem_cache_destroy(slab_vmas); 2194 } 2195 2196 int __init i915_vma_module_init(void) 2197 { 2198 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 2199 if (!slab_vmas) 2200 return -ENOMEM; 2201 2202 return 0; 2203 } 2204