1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <linux/dma-fence-array.h> 27 #include <drm/drm_gem.h> 28 29 #include "display/intel_frontbuffer.h" 30 #include "gem/i915_gem_lmem.h" 31 #include "gem/i915_gem_tiling.h" 32 #include "gt/intel_engine.h" 33 #include "gt/intel_engine_heartbeat.h" 34 #include "gt/intel_gt.h" 35 #include "gt/intel_gt_requests.h" 36 37 #include "i915_drv.h" 38 #include "i915_gem_evict.h" 39 #include "i915_sw_fence_work.h" 40 #include "i915_trace.h" 41 #include "i915_vma.h" 42 #include "i915_vma_resource.h" 43 44 static inline void assert_vma_held_evict(const struct i915_vma *vma) 45 { 46 /* 47 * We may be forced to unbind when the vm is dead, to clean it up. 48 * This is the only exception to the requirement of the object lock 49 * being held. 50 */ 51 if (kref_read(&vma->vm->ref)) 52 assert_object_held_shared(vma->obj); 53 } 54 55 static struct kmem_cache *slab_vmas; 56 57 static struct i915_vma *i915_vma_alloc(void) 58 { 59 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); 60 } 61 62 static void i915_vma_free(struct i915_vma *vma) 63 { 64 return kmem_cache_free(slab_vmas, vma); 65 } 66 67 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 68 69 #include <linux/stackdepot.h> 70 71 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 72 { 73 char buf[512]; 74 75 if (!vma->node.stack) { 76 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 77 vma->node.start, vma->node.size, reason); 78 return; 79 } 80 81 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); 82 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 83 vma->node.start, vma->node.size, reason, buf); 84 } 85 86 #else 87 88 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 89 { 90 } 91 92 #endif 93 94 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 95 { 96 return container_of(ref, typeof(struct i915_vma), active); 97 } 98 99 static int __i915_vma_active(struct i915_active *ref) 100 { 101 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 102 } 103 104 static void __i915_vma_retire(struct i915_active *ref) 105 { 106 i915_vma_put(active_to_vma(ref)); 107 } 108 109 static struct i915_vma * 110 vma_create(struct drm_i915_gem_object *obj, 111 struct i915_address_space *vm, 112 const struct i915_gtt_view *view) 113 { 114 struct i915_vma *pos = ERR_PTR(-E2BIG); 115 struct i915_vma *vma; 116 struct rb_node *rb, **p; 117 int err; 118 119 /* The aliasing_ppgtt should never be used directly! */ 120 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 121 122 vma = i915_vma_alloc(); 123 if (vma == NULL) 124 return ERR_PTR(-ENOMEM); 125 126 vma->ops = &vm->vma_ops; 127 vma->obj = obj; 128 vma->size = obj->base.size; 129 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 130 131 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); 132 133 /* Declare ourselves safe for use inside shrinkers */ 134 if (IS_ENABLED(CONFIG_LOCKDEP)) { 135 fs_reclaim_acquire(GFP_KERNEL); 136 might_lock(&vma->active.mutex); 137 fs_reclaim_release(GFP_KERNEL); 138 } 139 140 INIT_LIST_HEAD(&vma->closed_link); 141 INIT_LIST_HEAD(&vma->obj_link); 142 RB_CLEAR_NODE(&vma->obj_node); 143 144 if (view && view->type != I915_GTT_VIEW_NORMAL) { 145 vma->gtt_view = *view; 146 if (view->type == I915_GTT_VIEW_PARTIAL) { 147 GEM_BUG_ON(range_overflows_t(u64, 148 view->partial.offset, 149 view->partial.size, 150 obj->base.size >> PAGE_SHIFT)); 151 vma->size = view->partial.size; 152 vma->size <<= PAGE_SHIFT; 153 GEM_BUG_ON(vma->size > obj->base.size); 154 } else if (view->type == I915_GTT_VIEW_ROTATED) { 155 vma->size = intel_rotation_info_size(&view->rotated); 156 vma->size <<= PAGE_SHIFT; 157 } else if (view->type == I915_GTT_VIEW_REMAPPED) { 158 vma->size = intel_remapped_info_size(&view->remapped); 159 vma->size <<= PAGE_SHIFT; 160 } 161 } 162 163 if (unlikely(vma->size > vm->total)) 164 goto err_vma; 165 166 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 167 168 err = mutex_lock_interruptible(&vm->mutex); 169 if (err) { 170 pos = ERR_PTR(err); 171 goto err_vma; 172 } 173 174 vma->vm = vm; 175 list_add_tail(&vma->vm_link, &vm->unbound_list); 176 177 spin_lock(&obj->vma.lock); 178 if (i915_is_ggtt(vm)) { 179 if (unlikely(overflows_type(vma->size, u32))) 180 goto err_unlock; 181 182 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 183 i915_gem_object_get_tiling(obj), 184 i915_gem_object_get_stride(obj)); 185 if (unlikely(vma->fence_size < vma->size || /* overflow */ 186 vma->fence_size > vm->total)) 187 goto err_unlock; 188 189 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 190 191 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 192 i915_gem_object_get_tiling(obj), 193 i915_gem_object_get_stride(obj)); 194 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 195 196 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 197 } 198 199 rb = NULL; 200 p = &obj->vma.tree.rb_node; 201 while (*p) { 202 long cmp; 203 204 rb = *p; 205 pos = rb_entry(rb, struct i915_vma, obj_node); 206 207 /* 208 * If the view already exists in the tree, another thread 209 * already created a matching vma, so return the older instance 210 * and dispose of ours. 211 */ 212 cmp = i915_vma_compare(pos, vm, view); 213 if (cmp < 0) 214 p = &rb->rb_right; 215 else if (cmp > 0) 216 p = &rb->rb_left; 217 else 218 goto err_unlock; 219 } 220 rb_link_node(&vma->obj_node, rb, p); 221 rb_insert_color(&vma->obj_node, &obj->vma.tree); 222 223 if (i915_vma_is_ggtt(vma)) 224 /* 225 * We put the GGTT vma at the start of the vma-list, followed 226 * by the ppGGTT vma. This allows us to break early when 227 * iterating over only the GGTT vma for an object, see 228 * for_each_ggtt_vma() 229 */ 230 list_add(&vma->obj_link, &obj->vma.list); 231 else 232 list_add_tail(&vma->obj_link, &obj->vma.list); 233 234 spin_unlock(&obj->vma.lock); 235 mutex_unlock(&vm->mutex); 236 237 return vma; 238 239 err_unlock: 240 spin_unlock(&obj->vma.lock); 241 list_del_init(&vma->vm_link); 242 mutex_unlock(&vm->mutex); 243 err_vma: 244 i915_vma_free(vma); 245 return pos; 246 } 247 248 static struct i915_vma * 249 i915_vma_lookup(struct drm_i915_gem_object *obj, 250 struct i915_address_space *vm, 251 const struct i915_gtt_view *view) 252 { 253 struct rb_node *rb; 254 255 rb = obj->vma.tree.rb_node; 256 while (rb) { 257 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 258 long cmp; 259 260 cmp = i915_vma_compare(vma, vm, view); 261 if (cmp == 0) 262 return vma; 263 264 if (cmp < 0) 265 rb = rb->rb_right; 266 else 267 rb = rb->rb_left; 268 } 269 270 return NULL; 271 } 272 273 /** 274 * i915_vma_instance - return the singleton instance of the VMA 275 * @obj: parent &struct drm_i915_gem_object to be mapped 276 * @vm: address space in which the mapping is located 277 * @view: additional mapping requirements 278 * 279 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 280 * the same @view characteristics. If a match is not found, one is created. 281 * Once created, the VMA is kept until either the object is freed, or the 282 * address space is closed. 283 * 284 * Returns the vma, or an error pointer. 285 */ 286 struct i915_vma * 287 i915_vma_instance(struct drm_i915_gem_object *obj, 288 struct i915_address_space *vm, 289 const struct i915_gtt_view *view) 290 { 291 struct i915_vma *vma; 292 293 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); 294 GEM_BUG_ON(!kref_read(&vm->ref)); 295 296 spin_lock(&obj->vma.lock); 297 vma = i915_vma_lookup(obj, vm, view); 298 spin_unlock(&obj->vma.lock); 299 300 /* vma_create() will resolve the race if another creates the vma */ 301 if (unlikely(!vma)) 302 vma = vma_create(obj, vm, view); 303 304 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 305 return vma; 306 } 307 308 struct i915_vma_work { 309 struct dma_fence_work base; 310 struct i915_address_space *vm; 311 struct i915_vm_pt_stash stash; 312 struct i915_vma_resource *vma_res; 313 struct drm_i915_gem_object *obj; 314 struct i915_sw_dma_fence_cb cb; 315 enum i915_cache_level cache_level; 316 unsigned int flags; 317 }; 318 319 static void __vma_bind(struct dma_fence_work *work) 320 { 321 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 322 struct i915_vma_resource *vma_res = vw->vma_res; 323 324 /* 325 * We are about the bind the object, which must mean we have already 326 * signaled the work to potentially clear/move the pages underneath. If 327 * something went wrong at that stage then the object should have 328 * unknown_state set, in which case we need to skip the bind. 329 */ 330 if (i915_gem_object_has_unknown_state(vw->obj)) 331 return; 332 333 vma_res->ops->bind_vma(vma_res->vm, &vw->stash, 334 vma_res, vw->cache_level, vw->flags); 335 } 336 337 static void __vma_release(struct dma_fence_work *work) 338 { 339 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 340 341 if (vw->obj) 342 i915_gem_object_put(vw->obj); 343 344 i915_vm_free_pt_stash(vw->vm, &vw->stash); 345 if (vw->vma_res) 346 i915_vma_resource_put(vw->vma_res); 347 } 348 349 static const struct dma_fence_work_ops bind_ops = { 350 .name = "bind", 351 .work = __vma_bind, 352 .release = __vma_release, 353 }; 354 355 struct i915_vma_work *i915_vma_work(void) 356 { 357 struct i915_vma_work *vw; 358 359 vw = kzalloc(sizeof(*vw), GFP_KERNEL); 360 if (!vw) 361 return NULL; 362 363 dma_fence_work_init(&vw->base, &bind_ops); 364 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 365 366 return vw; 367 } 368 369 int i915_vma_wait_for_bind(struct i915_vma *vma) 370 { 371 int err = 0; 372 373 if (rcu_access_pointer(vma->active.excl.fence)) { 374 struct dma_fence *fence; 375 376 rcu_read_lock(); 377 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 378 rcu_read_unlock(); 379 if (fence) { 380 err = dma_fence_wait(fence, true); 381 dma_fence_put(fence); 382 } 383 } 384 385 return err; 386 } 387 388 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 389 static int i915_vma_verify_bind_complete(struct i915_vma *vma) 390 { 391 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); 392 int err; 393 394 if (!fence) 395 return 0; 396 397 if (dma_fence_is_signaled(fence)) 398 err = fence->error; 399 else 400 err = -EBUSY; 401 402 dma_fence_put(fence); 403 404 return err; 405 } 406 #else 407 #define i915_vma_verify_bind_complete(_vma) 0 408 #endif 409 410 I915_SELFTEST_EXPORT void 411 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, 412 struct i915_vma *vma) 413 { 414 struct drm_i915_gem_object *obj = vma->obj; 415 416 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, 417 obj->mm.rsgt, i915_gem_object_is_readonly(obj), 418 i915_gem_object_is_lmem(obj), obj->mm.region, 419 vma->ops, vma->private, vma->node.start, 420 vma->node.size, vma->size); 421 } 422 423 /** 424 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 425 * @vma: VMA to map 426 * @cache_level: mapping cache level 427 * @flags: flags like global or local mapping 428 * @work: preallocated worker for allocating and binding the PTE 429 * @vma_res: pointer to a preallocated vma resource. The resource is either 430 * consumed or freed. 431 * 432 * DMA addresses are taken from the scatter-gather table of this object (or of 433 * this VMA in case of non-default GGTT views) and PTE entries set up. 434 * Note that DMA addresses are also the only part of the SG table we care about. 435 */ 436 int i915_vma_bind(struct i915_vma *vma, 437 enum i915_cache_level cache_level, 438 u32 flags, 439 struct i915_vma_work *work, 440 struct i915_vma_resource *vma_res) 441 { 442 u32 bind_flags; 443 u32 vma_flags; 444 int ret; 445 446 lockdep_assert_held(&vma->vm->mutex); 447 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 448 GEM_BUG_ON(vma->size > vma->node.size); 449 450 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 451 vma->node.size, 452 vma->vm->total))) { 453 i915_vma_resource_free(vma_res); 454 return -ENODEV; 455 } 456 457 if (GEM_DEBUG_WARN_ON(!flags)) { 458 i915_vma_resource_free(vma_res); 459 return -EINVAL; 460 } 461 462 bind_flags = flags; 463 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 464 465 vma_flags = atomic_read(&vma->flags); 466 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 467 468 bind_flags &= ~vma_flags; 469 if (bind_flags == 0) { 470 i915_vma_resource_free(vma_res); 471 return 0; 472 } 473 474 GEM_BUG_ON(!atomic_read(&vma->pages_count)); 475 476 /* Wait for or await async unbinds touching our range */ 477 if (work && bind_flags & vma->vm->bind_async_flags) 478 ret = i915_vma_resource_bind_dep_await(vma->vm, 479 &work->base.chain, 480 vma->node.start, 481 vma->node.size, 482 true, 483 GFP_NOWAIT | 484 __GFP_RETRY_MAYFAIL | 485 __GFP_NOWARN); 486 else 487 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start, 488 vma->node.size, true); 489 if (ret) { 490 i915_vma_resource_free(vma_res); 491 return ret; 492 } 493 494 if (vma->resource || !vma_res) { 495 /* Rebinding with an additional I915_VMA_*_BIND */ 496 GEM_WARN_ON(!vma_flags); 497 i915_vma_resource_free(vma_res); 498 } else { 499 i915_vma_resource_init_from_vma(vma_res, vma); 500 vma->resource = vma_res; 501 } 502 trace_i915_vma_bind(vma, bind_flags); 503 if (work && bind_flags & vma->vm->bind_async_flags) { 504 struct dma_fence *prev; 505 506 work->vma_res = i915_vma_resource_get(vma->resource); 507 work->cache_level = cache_level; 508 work->flags = bind_flags; 509 510 /* 511 * Note we only want to chain up to the migration fence on 512 * the pages (not the object itself). As we don't track that, 513 * yet, we have to use the exclusive fence instead. 514 * 515 * Also note that we do not want to track the async vma as 516 * part of the obj->resv->excl_fence as it only affects 517 * execution and not content or object's backing store lifetime. 518 */ 519 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 520 if (prev) { 521 __i915_sw_fence_await_dma_fence(&work->base.chain, 522 prev, 523 &work->cb); 524 dma_fence_put(prev); 525 } 526 527 work->base.dma.error = 0; /* enable the queue_work() */ 528 work->obj = i915_gem_object_get(vma->obj); 529 } else { 530 ret = i915_gem_object_wait_moving_fence(vma->obj, true); 531 if (ret) { 532 i915_vma_resource_free(vma->resource); 533 vma->resource = NULL; 534 535 return ret; 536 } 537 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level, 538 bind_flags); 539 } 540 541 atomic_or(bind_flags, &vma->flags); 542 return 0; 543 } 544 545 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 546 { 547 void __iomem *ptr; 548 int err; 549 550 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY)) 551 return IOMEM_ERR_PTR(-EINVAL); 552 553 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 554 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 555 GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); 556 557 ptr = READ_ONCE(vma->iomap); 558 if (ptr == NULL) { 559 /* 560 * TODO: consider just using i915_gem_object_pin_map() for lmem 561 * instead, which already supports mapping non-contiguous chunks 562 * of pages, that way we can also drop the 563 * I915_BO_ALLOC_CONTIGUOUS when allocating the object. 564 */ 565 if (i915_gem_object_is_lmem(vma->obj)) { 566 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, 567 vma->obj->base.size); 568 } else if (i915_vma_is_map_and_fenceable(vma)) { 569 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 570 vma->node.start, 571 vma->node.size); 572 } else { 573 ptr = (void __iomem *) 574 i915_gem_object_pin_map(vma->obj, I915_MAP_WC); 575 if (IS_ERR(ptr)) { 576 err = PTR_ERR(ptr); 577 goto err; 578 } 579 ptr = page_pack_bits(ptr, 1); 580 } 581 582 if (ptr == NULL) { 583 err = -ENOMEM; 584 goto err; 585 } 586 587 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 588 if (page_unmask_bits(ptr)) 589 __i915_gem_object_release_map(vma->obj); 590 else 591 io_mapping_unmap(ptr); 592 ptr = vma->iomap; 593 } 594 } 595 596 __i915_vma_pin(vma); 597 598 err = i915_vma_pin_fence(vma); 599 if (err) 600 goto err_unpin; 601 602 i915_vma_set_ggtt_write(vma); 603 604 /* NB Access through the GTT requires the device to be awake. */ 605 return page_mask_bits(ptr); 606 607 err_unpin: 608 __i915_vma_unpin(vma); 609 err: 610 return IOMEM_ERR_PTR(err); 611 } 612 613 void i915_vma_flush_writes(struct i915_vma *vma) 614 { 615 if (i915_vma_unset_ggtt_write(vma)) 616 intel_gt_flush_ggtt_writes(vma->vm->gt); 617 } 618 619 void i915_vma_unpin_iomap(struct i915_vma *vma) 620 { 621 GEM_BUG_ON(vma->iomap == NULL); 622 623 /* XXX We keep the mapping until __i915_vma_unbind()/evict() */ 624 625 i915_vma_flush_writes(vma); 626 627 i915_vma_unpin_fence(vma); 628 i915_vma_unpin(vma); 629 } 630 631 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 632 { 633 struct i915_vma *vma; 634 struct drm_i915_gem_object *obj; 635 636 vma = fetch_and_zero(p_vma); 637 if (!vma) 638 return; 639 640 obj = vma->obj; 641 GEM_BUG_ON(!obj); 642 643 i915_vma_unpin(vma); 644 645 if (flags & I915_VMA_RELEASE_MAP) 646 i915_gem_object_unpin_map(obj); 647 648 i915_gem_object_put(obj); 649 } 650 651 bool i915_vma_misplaced(const struct i915_vma *vma, 652 u64 size, u64 alignment, u64 flags) 653 { 654 if (!drm_mm_node_allocated(&vma->node)) 655 return false; 656 657 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 658 return true; 659 660 if (vma->node.size < size) 661 return true; 662 663 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 664 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 665 return true; 666 667 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 668 return true; 669 670 if (flags & PIN_OFFSET_BIAS && 671 vma->node.start < (flags & PIN_OFFSET_MASK)) 672 return true; 673 674 if (flags & PIN_OFFSET_FIXED && 675 vma->node.start != (flags & PIN_OFFSET_MASK)) 676 return true; 677 678 return false; 679 } 680 681 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 682 { 683 bool mappable, fenceable; 684 685 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 686 GEM_BUG_ON(!vma->fence_size); 687 688 fenceable = (vma->node.size >= vma->fence_size && 689 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 690 691 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 692 693 if (mappable && fenceable) 694 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 695 else 696 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 697 } 698 699 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 700 { 701 struct drm_mm_node *node = &vma->node; 702 struct drm_mm_node *other; 703 704 /* 705 * On some machines we have to be careful when putting differing types 706 * of snoopable memory together to avoid the prefetcher crossing memory 707 * domains and dying. During vm initialisation, we decide whether or not 708 * these constraints apply and set the drm_mm.color_adjust 709 * appropriately. 710 */ 711 if (!i915_vm_has_cache_coloring(vma->vm)) 712 return true; 713 714 /* Only valid to be called on an already inserted vma */ 715 GEM_BUG_ON(!drm_mm_node_allocated(node)); 716 GEM_BUG_ON(list_empty(&node->node_list)); 717 718 other = list_prev_entry(node, node_list); 719 if (i915_node_color_differs(other, color) && 720 !drm_mm_hole_follows(other)) 721 return false; 722 723 other = list_next_entry(node, node_list); 724 if (i915_node_color_differs(other, color) && 725 !drm_mm_hole_follows(node)) 726 return false; 727 728 return true; 729 } 730 731 /** 732 * i915_vma_insert - finds a slot for the vma in its address space 733 * @vma: the vma 734 * @size: requested size in bytes (can be larger than the VMA) 735 * @alignment: required alignment 736 * @flags: mask of PIN_* flags to use 737 * 738 * First we try to allocate some free space that meets the requirements for 739 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 740 * preferrably the oldest idle entry to make room for the new VMA. 741 * 742 * Returns: 743 * 0 on success, negative error code otherwise. 744 */ 745 static int 746 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 747 u64 size, u64 alignment, u64 flags) 748 { 749 unsigned long color; 750 u64 start, end; 751 int ret; 752 753 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 754 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 755 756 size = max(size, vma->size); 757 alignment = max(alignment, vma->display_alignment); 758 if (flags & PIN_MAPPABLE) { 759 size = max_t(typeof(size), size, vma->fence_size); 760 alignment = max_t(typeof(alignment), 761 alignment, vma->fence_alignment); 762 } 763 764 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 765 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 766 GEM_BUG_ON(!is_power_of_2(alignment)); 767 768 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 769 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 770 771 end = vma->vm->total; 772 if (flags & PIN_MAPPABLE) 773 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 774 if (flags & PIN_ZONE_4G) 775 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 776 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 777 778 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj)); 779 780 /* If binding the object/GGTT view requires more space than the entire 781 * aperture has, reject it early before evicting everything in a vain 782 * attempt to find space. 783 */ 784 if (size > end) { 785 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 786 size, flags & PIN_MAPPABLE ? "mappable" : "total", 787 end); 788 return -ENOSPC; 789 } 790 791 color = 0; 792 793 if (i915_vm_has_cache_coloring(vma->vm)) 794 color = vma->obj->cache_level; 795 796 if (flags & PIN_OFFSET_FIXED) { 797 u64 offset = flags & PIN_OFFSET_MASK; 798 if (!IS_ALIGNED(offset, alignment) || 799 range_overflows(offset, size, end)) 800 return -EINVAL; 801 802 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, 803 size, offset, color, 804 flags); 805 if (ret) 806 return ret; 807 } else { 808 /* 809 * We only support huge gtt pages through the 48b PPGTT, 810 * however we also don't want to force any alignment for 811 * objects which need to be tightly packed into the low 32bits. 812 * 813 * Note that we assume that GGTT are limited to 4GiB for the 814 * forseeable future. See also i915_ggtt_offset(). 815 */ 816 if (upper_32_bits(end - 1) && 817 vma->page_sizes.sg > I915_GTT_PAGE_SIZE && 818 !HAS_64K_PAGES(vma->vm->i915)) { 819 /* 820 * We can't mix 64K and 4K PTEs in the same page-table 821 * (2M block), and so to avoid the ugliness and 822 * complexity of coloring we opt for just aligning 64K 823 * objects to 2M. 824 */ 825 u64 page_alignment = 826 rounddown_pow_of_two(vma->page_sizes.sg | 827 I915_GTT_PAGE_SIZE_2M); 828 829 /* 830 * Check we don't expand for the limited Global GTT 831 * (mappable aperture is even more precious!). This 832 * also checks that we exclude the aliasing-ppgtt. 833 */ 834 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 835 836 alignment = max(alignment, page_alignment); 837 838 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 839 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 840 } 841 842 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node, 843 size, alignment, color, 844 start, end, flags); 845 if (ret) 846 return ret; 847 848 GEM_BUG_ON(vma->node.start < start); 849 GEM_BUG_ON(vma->node.start + vma->node.size > end); 850 } 851 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 852 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 853 854 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 855 856 return 0; 857 } 858 859 static void 860 i915_vma_detach(struct i915_vma *vma) 861 { 862 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 863 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 864 865 /* 866 * And finally now the object is completely decoupled from this 867 * vma, we can drop its hold on the backing storage and allow 868 * it to be reaped by the shrinker. 869 */ 870 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 871 } 872 873 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 874 { 875 unsigned int bound; 876 877 bound = atomic_read(&vma->flags); 878 879 if (flags & PIN_VALIDATE) { 880 flags &= I915_VMA_BIND_MASK; 881 882 return (flags & bound) == flags; 883 } 884 885 /* with the lock mandatory for unbind, we don't race here */ 886 flags &= I915_VMA_BIND_MASK; 887 do { 888 if (unlikely(flags & ~bound)) 889 return false; 890 891 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 892 return false; 893 894 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 895 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 896 897 return true; 898 } 899 900 static struct scatterlist * 901 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 902 unsigned int width, unsigned int height, 903 unsigned int src_stride, unsigned int dst_stride, 904 struct sg_table *st, struct scatterlist *sg) 905 { 906 unsigned int column, row; 907 unsigned int src_idx; 908 909 for (column = 0; column < width; column++) { 910 unsigned int left; 911 912 src_idx = src_stride * (height - 1) + column + offset; 913 for (row = 0; row < height; row++) { 914 st->nents++; 915 /* 916 * We don't need the pages, but need to initialize 917 * the entries so the sg list can be happily traversed. 918 * The only thing we need are DMA addresses. 919 */ 920 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 921 sg_dma_address(sg) = 922 i915_gem_object_get_dma_address(obj, src_idx); 923 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 924 sg = sg_next(sg); 925 src_idx -= src_stride; 926 } 927 928 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; 929 930 if (!left) 931 continue; 932 933 st->nents++; 934 935 /* 936 * The DE ignores the PTEs for the padding tiles, the sg entry 937 * here is just a conenience to indicate how many padding PTEs 938 * to insert at this spot. 939 */ 940 sg_set_page(sg, NULL, left, 0); 941 sg_dma_address(sg) = 0; 942 sg_dma_len(sg) = left; 943 sg = sg_next(sg); 944 } 945 946 return sg; 947 } 948 949 static noinline struct sg_table * 950 intel_rotate_pages(struct intel_rotation_info *rot_info, 951 struct drm_i915_gem_object *obj) 952 { 953 unsigned int size = intel_rotation_info_size(rot_info); 954 struct drm_i915_private *i915 = to_i915(obj->base.dev); 955 struct sg_table *st; 956 struct scatterlist *sg; 957 int ret = -ENOMEM; 958 int i; 959 960 /* Allocate target SG list. */ 961 st = kmalloc(sizeof(*st), GFP_KERNEL); 962 if (!st) 963 goto err_st_alloc; 964 965 ret = sg_alloc_table(st, size, GFP_KERNEL); 966 if (ret) 967 goto err_sg_alloc; 968 969 st->nents = 0; 970 sg = st->sgl; 971 972 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 973 sg = rotate_pages(obj, rot_info->plane[i].offset, 974 rot_info->plane[i].width, rot_info->plane[i].height, 975 rot_info->plane[i].src_stride, 976 rot_info->plane[i].dst_stride, 977 st, sg); 978 979 return st; 980 981 err_sg_alloc: 982 kfree(st); 983 err_st_alloc: 984 985 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 986 obj->base.size, rot_info->plane[0].width, 987 rot_info->plane[0].height, size); 988 989 return ERR_PTR(ret); 990 } 991 992 static struct scatterlist * 993 add_padding_pages(unsigned int count, 994 struct sg_table *st, struct scatterlist *sg) 995 { 996 st->nents++; 997 998 /* 999 * The DE ignores the PTEs for the padding tiles, the sg entry 1000 * here is just a convenience to indicate how many padding PTEs 1001 * to insert at this spot. 1002 */ 1003 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); 1004 sg_dma_address(sg) = 0; 1005 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; 1006 sg = sg_next(sg); 1007 1008 return sg; 1009 } 1010 1011 static struct scatterlist * 1012 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, 1013 unsigned int offset, unsigned int alignment_pad, 1014 unsigned int width, unsigned int height, 1015 unsigned int src_stride, unsigned int dst_stride, 1016 struct sg_table *st, struct scatterlist *sg, 1017 unsigned int *gtt_offset) 1018 { 1019 unsigned int row; 1020 1021 if (!width || !height) 1022 return sg; 1023 1024 if (alignment_pad) 1025 sg = add_padding_pages(alignment_pad, st, sg); 1026 1027 for (row = 0; row < height; row++) { 1028 unsigned int left = width * I915_GTT_PAGE_SIZE; 1029 1030 while (left) { 1031 dma_addr_t addr; 1032 unsigned int length; 1033 1034 /* 1035 * We don't need the pages, but need to initialize 1036 * the entries so the sg list can be happily traversed. 1037 * The only thing we need are DMA addresses. 1038 */ 1039 1040 addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 1041 1042 length = min(left, length); 1043 1044 st->nents++; 1045 1046 sg_set_page(sg, NULL, length, 0); 1047 sg_dma_address(sg) = addr; 1048 sg_dma_len(sg) = length; 1049 sg = sg_next(sg); 1050 1051 offset += length / I915_GTT_PAGE_SIZE; 1052 left -= length; 1053 } 1054 1055 offset += src_stride - width; 1056 1057 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; 1058 1059 if (!left) 1060 continue; 1061 1062 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); 1063 } 1064 1065 *gtt_offset += alignment_pad + dst_stride * height; 1066 1067 return sg; 1068 } 1069 1070 static struct scatterlist * 1071 remap_contiguous_pages(struct drm_i915_gem_object *obj, 1072 unsigned int obj_offset, 1073 unsigned int count, 1074 struct sg_table *st, struct scatterlist *sg) 1075 { 1076 struct scatterlist *iter; 1077 unsigned int offset; 1078 1079 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); 1080 GEM_BUG_ON(!iter); 1081 1082 do { 1083 unsigned int len; 1084 1085 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1086 count << PAGE_SHIFT); 1087 sg_set_page(sg, NULL, len, 0); 1088 sg_dma_address(sg) = 1089 sg_dma_address(iter) + (offset << PAGE_SHIFT); 1090 sg_dma_len(sg) = len; 1091 1092 st->nents++; 1093 count -= len >> PAGE_SHIFT; 1094 if (count == 0) 1095 return sg; 1096 1097 sg = __sg_next(sg); 1098 iter = __sg_next(iter); 1099 offset = 0; 1100 } while (1); 1101 } 1102 1103 static struct scatterlist * 1104 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, 1105 unsigned int obj_offset, unsigned int alignment_pad, 1106 unsigned int size, 1107 struct sg_table *st, struct scatterlist *sg, 1108 unsigned int *gtt_offset) 1109 { 1110 if (!size) 1111 return sg; 1112 1113 if (alignment_pad) 1114 sg = add_padding_pages(alignment_pad, st, sg); 1115 1116 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); 1117 sg = sg_next(sg); 1118 1119 *gtt_offset += alignment_pad + size; 1120 1121 return sg; 1122 } 1123 1124 static struct scatterlist * 1125 remap_color_plane_pages(const struct intel_remapped_info *rem_info, 1126 struct drm_i915_gem_object *obj, 1127 int color_plane, 1128 struct sg_table *st, struct scatterlist *sg, 1129 unsigned int *gtt_offset) 1130 { 1131 unsigned int alignment_pad = 0; 1132 1133 if (rem_info->plane_alignment) 1134 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; 1135 1136 if (rem_info->plane[color_plane].linear) 1137 sg = remap_linear_color_plane_pages(obj, 1138 rem_info->plane[color_plane].offset, 1139 alignment_pad, 1140 rem_info->plane[color_plane].size, 1141 st, sg, 1142 gtt_offset); 1143 1144 else 1145 sg = remap_tiled_color_plane_pages(obj, 1146 rem_info->plane[color_plane].offset, 1147 alignment_pad, 1148 rem_info->plane[color_plane].width, 1149 rem_info->plane[color_plane].height, 1150 rem_info->plane[color_plane].src_stride, 1151 rem_info->plane[color_plane].dst_stride, 1152 st, sg, 1153 gtt_offset); 1154 1155 return sg; 1156 } 1157 1158 static noinline struct sg_table * 1159 intel_remap_pages(struct intel_remapped_info *rem_info, 1160 struct drm_i915_gem_object *obj) 1161 { 1162 unsigned int size = intel_remapped_info_size(rem_info); 1163 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1164 struct sg_table *st; 1165 struct scatterlist *sg; 1166 unsigned int gtt_offset = 0; 1167 int ret = -ENOMEM; 1168 int i; 1169 1170 /* Allocate target SG list. */ 1171 st = kmalloc(sizeof(*st), GFP_KERNEL); 1172 if (!st) 1173 goto err_st_alloc; 1174 1175 ret = sg_alloc_table(st, size, GFP_KERNEL); 1176 if (ret) 1177 goto err_sg_alloc; 1178 1179 st->nents = 0; 1180 sg = st->sgl; 1181 1182 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 1183 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); 1184 1185 i915_sg_trim(st); 1186 1187 return st; 1188 1189 err_sg_alloc: 1190 kfree(st); 1191 err_st_alloc: 1192 1193 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 1194 obj->base.size, rem_info->plane[0].width, 1195 rem_info->plane[0].height, size); 1196 1197 return ERR_PTR(ret); 1198 } 1199 1200 static noinline struct sg_table * 1201 intel_partial_pages(const struct i915_gtt_view *view, 1202 struct drm_i915_gem_object *obj) 1203 { 1204 struct sg_table *st; 1205 struct scatterlist *sg; 1206 unsigned int count = view->partial.size; 1207 int ret = -ENOMEM; 1208 1209 st = kmalloc(sizeof(*st), GFP_KERNEL); 1210 if (!st) 1211 goto err_st_alloc; 1212 1213 ret = sg_alloc_table(st, count, GFP_KERNEL); 1214 if (ret) 1215 goto err_sg_alloc; 1216 1217 st->nents = 0; 1218 1219 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); 1220 1221 sg_mark_end(sg); 1222 i915_sg_trim(st); /* Drop any unused tail entries. */ 1223 1224 return st; 1225 1226 err_sg_alloc: 1227 kfree(st); 1228 err_st_alloc: 1229 return ERR_PTR(ret); 1230 } 1231 1232 static int 1233 __i915_vma_get_pages(struct i915_vma *vma) 1234 { 1235 struct sg_table *pages; 1236 1237 /* 1238 * The vma->pages are only valid within the lifespan of the borrowed 1239 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 1240 * must be the vma->pages. A simple rule is that vma->pages must only 1241 * be accessed when the obj->mm.pages are pinned. 1242 */ 1243 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 1244 1245 switch (vma->gtt_view.type) { 1246 default: 1247 GEM_BUG_ON(vma->gtt_view.type); 1248 fallthrough; 1249 case I915_GTT_VIEW_NORMAL: 1250 pages = vma->obj->mm.pages; 1251 break; 1252 1253 case I915_GTT_VIEW_ROTATED: 1254 pages = 1255 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj); 1256 break; 1257 1258 case I915_GTT_VIEW_REMAPPED: 1259 pages = 1260 intel_remap_pages(&vma->gtt_view.remapped, vma->obj); 1261 break; 1262 1263 case I915_GTT_VIEW_PARTIAL: 1264 pages = intel_partial_pages(&vma->gtt_view, vma->obj); 1265 break; 1266 } 1267 1268 if (IS_ERR(pages)) { 1269 drm_err(&vma->vm->i915->drm, 1270 "Failed to get pages for VMA view type %u (%ld)!\n", 1271 vma->gtt_view.type, PTR_ERR(pages)); 1272 return PTR_ERR(pages); 1273 } 1274 1275 vma->pages = pages; 1276 1277 return 0; 1278 } 1279 1280 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) 1281 { 1282 int err; 1283 1284 if (atomic_add_unless(&vma->pages_count, 1, 0)) 1285 return 0; 1286 1287 err = i915_gem_object_pin_pages(vma->obj); 1288 if (err) 1289 return err; 1290 1291 err = __i915_vma_get_pages(vma); 1292 if (err) 1293 goto err_unpin; 1294 1295 vma->page_sizes = vma->obj->mm.page_sizes; 1296 atomic_inc(&vma->pages_count); 1297 1298 return 0; 1299 1300 err_unpin: 1301 __i915_gem_object_unpin_pages(vma->obj); 1302 1303 return err; 1304 } 1305 1306 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb) 1307 { 1308 /* 1309 * Before we release the pages that were bound by this vma, we 1310 * must invalidate all the TLBs that may still have a reference 1311 * back to our physical address. It only needs to be done once, 1312 * so after updating the PTE to point away from the pages, record 1313 * the most recent TLB invalidation seqno, and if we have not yet 1314 * flushed the TLBs upon release, perform a full invalidation. 1315 */ 1316 WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt)); 1317 } 1318 1319 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 1320 { 1321 /* We allocate under vma_get_pages, so beware the shrinker */ 1322 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 1323 1324 if (atomic_sub_return(count, &vma->pages_count) == 0) { 1325 if (vma->pages != vma->obj->mm.pages) { 1326 sg_free_table(vma->pages); 1327 kfree(vma->pages); 1328 } 1329 vma->pages = NULL; 1330 1331 i915_gem_object_unpin_pages(vma->obj); 1332 } 1333 } 1334 1335 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) 1336 { 1337 if (atomic_add_unless(&vma->pages_count, -1, 1)) 1338 return; 1339 1340 __vma_put_pages(vma, 1); 1341 } 1342 1343 static void vma_unbind_pages(struct i915_vma *vma) 1344 { 1345 unsigned int count; 1346 1347 lockdep_assert_held(&vma->vm->mutex); 1348 1349 /* The upper portion of pages_count is the number of bindings */ 1350 count = atomic_read(&vma->pages_count); 1351 count >>= I915_VMA_PAGES_BIAS; 1352 GEM_BUG_ON(!count); 1353 1354 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 1355 } 1356 1357 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1358 u64 size, u64 alignment, u64 flags) 1359 { 1360 struct i915_vma_work *work = NULL; 1361 struct dma_fence *moving = NULL; 1362 struct i915_vma_resource *vma_res = NULL; 1363 intel_wakeref_t wakeref = 0; 1364 unsigned int bound; 1365 int err; 1366 1367 assert_vma_held(vma); 1368 GEM_BUG_ON(!ww); 1369 1370 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 1371 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 1372 1373 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 1374 1375 /* First try and grab the pin without rebinding the vma */ 1376 if (try_qad_pin(vma, flags)) 1377 return 0; 1378 1379 err = i915_vma_get_pages(vma); 1380 if (err) 1381 return err; 1382 1383 if (flags & PIN_GLOBAL) 1384 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 1385 1386 if (flags & vma->vm->bind_async_flags) { 1387 /* lock VM */ 1388 err = i915_vm_lock_objects(vma->vm, ww); 1389 if (err) 1390 goto err_rpm; 1391 1392 work = i915_vma_work(); 1393 if (!work) { 1394 err = -ENOMEM; 1395 goto err_rpm; 1396 } 1397 1398 work->vm = vma->vm; 1399 1400 err = i915_gem_object_get_moving_fence(vma->obj, &moving); 1401 if (err) 1402 goto err_rpm; 1403 1404 dma_fence_work_chain(&work->base, moving); 1405 1406 /* Allocate enough page directories to used PTE */ 1407 if (vma->vm->allocate_va_range) { 1408 err = i915_vm_alloc_pt_stash(vma->vm, 1409 &work->stash, 1410 vma->size); 1411 if (err) 1412 goto err_fence; 1413 1414 err = i915_vm_map_pt_stash(vma->vm, &work->stash); 1415 if (err) 1416 goto err_fence; 1417 } 1418 } 1419 1420 vma_res = i915_vma_resource_alloc(); 1421 if (IS_ERR(vma_res)) { 1422 err = PTR_ERR(vma_res); 1423 goto err_fence; 1424 } 1425 1426 /* 1427 * Differentiate between user/kernel vma inside the aliasing-ppgtt. 1428 * 1429 * We conflate the Global GTT with the user's vma when using the 1430 * aliasing-ppgtt, but it is still vitally important to try and 1431 * keep the use cases distinct. For example, userptr objects are 1432 * not allowed inside the Global GTT as that will cause lock 1433 * inversions when we have to evict them the mmu_notifier callbacks - 1434 * but they are allowed to be part of the user ppGTT which can never 1435 * be mapped. As such we try to give the distinct users of the same 1436 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 1437 * and i915_ppgtt separate]. 1438 * 1439 * NB this may cause us to mask real lock inversions -- while the 1440 * code is safe today, lockdep may not be able to spot future 1441 * transgressions. 1442 */ 1443 err = mutex_lock_interruptible_nested(&vma->vm->mutex, 1444 !(flags & PIN_GLOBAL)); 1445 if (err) 1446 goto err_vma_res; 1447 1448 /* No more allocations allowed now we hold vm->mutex */ 1449 1450 if (unlikely(i915_vma_is_closed(vma))) { 1451 err = -ENOENT; 1452 goto err_unlock; 1453 } 1454 1455 bound = atomic_read(&vma->flags); 1456 if (unlikely(bound & I915_VMA_ERROR)) { 1457 err = -ENOMEM; 1458 goto err_unlock; 1459 } 1460 1461 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 1462 err = -EAGAIN; /* pins are meant to be fairly temporary */ 1463 goto err_unlock; 1464 } 1465 1466 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 1467 if (!(flags & PIN_VALIDATE)) 1468 __i915_vma_pin(vma); 1469 goto err_unlock; 1470 } 1471 1472 err = i915_active_acquire(&vma->active); 1473 if (err) 1474 goto err_unlock; 1475 1476 if (!(bound & I915_VMA_BIND_MASK)) { 1477 err = i915_vma_insert(vma, ww, size, alignment, flags); 1478 if (err) 1479 goto err_active; 1480 1481 if (i915_is_ggtt(vma->vm)) 1482 __i915_vma_set_map_and_fenceable(vma); 1483 } 1484 1485 GEM_BUG_ON(!vma->pages); 1486 err = i915_vma_bind(vma, 1487 vma->obj->cache_level, 1488 flags, work, vma_res); 1489 vma_res = NULL; 1490 if (err) 1491 goto err_remove; 1492 1493 /* There should only be at most 2 active bindings (user, global) */ 1494 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 1495 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 1496 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 1497 1498 if (!(flags & PIN_VALIDATE)) { 1499 __i915_vma_pin(vma); 1500 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1501 } 1502 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 1503 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 1504 1505 err_remove: 1506 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 1507 i915_vma_detach(vma); 1508 drm_mm_remove_node(&vma->node); 1509 } 1510 err_active: 1511 i915_active_release(&vma->active); 1512 err_unlock: 1513 mutex_unlock(&vma->vm->mutex); 1514 err_vma_res: 1515 i915_vma_resource_free(vma_res); 1516 err_fence: 1517 if (work) 1518 dma_fence_work_commit_imm(&work->base); 1519 err_rpm: 1520 if (wakeref) 1521 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1522 1523 if (moving) 1524 dma_fence_put(moving); 1525 1526 i915_vma_put_pages(vma); 1527 return err; 1528 } 1529 1530 static void flush_idle_contexts(struct intel_gt *gt) 1531 { 1532 struct intel_engine_cs *engine; 1533 enum intel_engine_id id; 1534 1535 for_each_engine(engine, gt, id) 1536 intel_engine_flush_barriers(engine); 1537 1538 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 1539 } 1540 1541 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1542 u32 align, unsigned int flags) 1543 { 1544 struct i915_address_space *vm = vma->vm; 1545 int err; 1546 1547 do { 1548 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); 1549 1550 if (err != -ENOSPC) { 1551 if (!err) { 1552 err = i915_vma_wait_for_bind(vma); 1553 if (err) 1554 i915_vma_unpin(vma); 1555 } 1556 return err; 1557 } 1558 1559 /* Unlike i915_vma_pin, we don't take no for an answer! */ 1560 flush_idle_contexts(vm->gt); 1561 if (mutex_lock_interruptible(&vm->mutex) == 0) { 1562 /* 1563 * We pass NULL ww here, as we don't want to unbind 1564 * locked objects when called from execbuf when pinning 1565 * is removed. This would probably regress badly. 1566 */ 1567 i915_gem_evict_vm(vm, NULL); 1568 mutex_unlock(&vm->mutex); 1569 } 1570 } while (1); 1571 } 1572 1573 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1574 u32 align, unsigned int flags) 1575 { 1576 struct i915_gem_ww_ctx _ww; 1577 int err; 1578 1579 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1580 1581 if (ww) 1582 return __i915_ggtt_pin(vma, ww, align, flags); 1583 1584 lockdep_assert_not_held(&vma->obj->base.resv->lock.base); 1585 1586 for_i915_gem_ww(&_ww, err, true) { 1587 err = i915_gem_object_lock(vma->obj, &_ww); 1588 if (!err) 1589 err = __i915_ggtt_pin(vma, &_ww, align, flags); 1590 } 1591 1592 return err; 1593 } 1594 1595 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) 1596 { 1597 /* 1598 * We defer actually closing, unbinding and destroying the VMA until 1599 * the next idle point, or if the object is freed in the meantime. By 1600 * postponing the unbind, we allow for it to be resurrected by the 1601 * client, avoiding the work required to rebind the VMA. This is 1602 * advantageous for DRI, where the client/server pass objects 1603 * between themselves, temporarily opening a local VMA to the 1604 * object, and then closing it again. The same object is then reused 1605 * on the next frame (or two, depending on the depth of the swap queue) 1606 * causing us to rebind the VMA once more. This ends up being a lot 1607 * of wasted work for the steady state. 1608 */ 1609 GEM_BUG_ON(i915_vma_is_closed(vma)); 1610 list_add(&vma->closed_link, >->closed_vma); 1611 } 1612 1613 void i915_vma_close(struct i915_vma *vma) 1614 { 1615 struct intel_gt *gt = vma->vm->gt; 1616 unsigned long flags; 1617 1618 if (i915_vma_is_ggtt(vma)) 1619 return; 1620 1621 GEM_BUG_ON(!atomic_read(&vma->open_count)); 1622 if (atomic_dec_and_lock_irqsave(&vma->open_count, 1623 >->closed_lock, 1624 flags)) { 1625 __vma_close(vma, gt); 1626 spin_unlock_irqrestore(>->closed_lock, flags); 1627 } 1628 } 1629 1630 static void __i915_vma_remove_closed(struct i915_vma *vma) 1631 { 1632 list_del_init(&vma->closed_link); 1633 } 1634 1635 void i915_vma_reopen(struct i915_vma *vma) 1636 { 1637 struct intel_gt *gt = vma->vm->gt; 1638 1639 spin_lock_irq(>->closed_lock); 1640 if (i915_vma_is_closed(vma)) 1641 __i915_vma_remove_closed(vma); 1642 spin_unlock_irq(>->closed_lock); 1643 } 1644 1645 static void force_unbind(struct i915_vma *vma) 1646 { 1647 if (!drm_mm_node_allocated(&vma->node)) 1648 return; 1649 1650 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 1651 WARN_ON(__i915_vma_unbind(vma)); 1652 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 1653 } 1654 1655 static void release_references(struct i915_vma *vma, struct intel_gt *gt, 1656 bool vm_ddestroy) 1657 { 1658 struct drm_i915_gem_object *obj = vma->obj; 1659 1660 GEM_BUG_ON(i915_vma_is_active(vma)); 1661 1662 spin_lock(&obj->vma.lock); 1663 list_del(&vma->obj_link); 1664 if (!RB_EMPTY_NODE(&vma->obj_node)) 1665 rb_erase(&vma->obj_node, &obj->vma.tree); 1666 1667 spin_unlock(&obj->vma.lock); 1668 1669 spin_lock_irq(>->closed_lock); 1670 __i915_vma_remove_closed(vma); 1671 spin_unlock_irq(>->closed_lock); 1672 1673 if (vm_ddestroy) 1674 i915_vm_resv_put(vma->vm); 1675 1676 i915_active_fini(&vma->active); 1677 GEM_WARN_ON(vma->resource); 1678 i915_vma_free(vma); 1679 } 1680 1681 /** 1682 * i915_vma_destroy_locked - Remove all weak reference to the vma and put 1683 * the initial reference. 1684 * 1685 * This function should be called when it's decided the vma isn't needed 1686 * anymore. The caller must assure that it doesn't race with another lookup 1687 * plus destroy, typically by taking an appropriate reference. 1688 * 1689 * Current callsites are 1690 * - __i915_gem_object_pages_fini() 1691 * - __i915_vm_close() - Blocks the above function by taking a reference on 1692 * the object. 1693 * - __i915_vma_parked() - Blocks the above functions by taking a reference 1694 * on the vm and a reference on the object. Also takes the object lock so 1695 * destruction from __i915_vma_parked() can be blocked by holding the 1696 * object lock. Since the object lock is only allowed from within i915 with 1697 * an object refcount, holding the object lock also implicitly blocks the 1698 * vma freeing from __i915_gem_object_pages_fini(). 1699 * 1700 * Because of locks taken during destruction, a vma is also guaranteed to 1701 * stay alive while the following locks are held if it was looked up while 1702 * holding one of the locks: 1703 * - vm->mutex 1704 * - obj->vma.lock 1705 * - gt->closed_lock 1706 */ 1707 void i915_vma_destroy_locked(struct i915_vma *vma) 1708 { 1709 lockdep_assert_held(&vma->vm->mutex); 1710 1711 force_unbind(vma); 1712 list_del_init(&vma->vm_link); 1713 release_references(vma, vma->vm->gt, false); 1714 } 1715 1716 void i915_vma_destroy(struct i915_vma *vma) 1717 { 1718 struct intel_gt *gt; 1719 bool vm_ddestroy; 1720 1721 mutex_lock(&vma->vm->mutex); 1722 force_unbind(vma); 1723 list_del_init(&vma->vm_link); 1724 vm_ddestroy = vma->vm_ddestroy; 1725 vma->vm_ddestroy = false; 1726 1727 /* vma->vm may be freed when releasing vma->vm->mutex. */ 1728 gt = vma->vm->gt; 1729 mutex_unlock(&vma->vm->mutex); 1730 release_references(vma, gt, vm_ddestroy); 1731 } 1732 1733 void i915_vma_parked(struct intel_gt *gt) 1734 { 1735 struct i915_vma *vma, *next; 1736 LIST_HEAD(closed); 1737 1738 spin_lock_irq(>->closed_lock); 1739 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 1740 struct drm_i915_gem_object *obj = vma->obj; 1741 struct i915_address_space *vm = vma->vm; 1742 1743 /* XXX All to avoid keeping a reference on i915_vma itself */ 1744 1745 if (!kref_get_unless_zero(&obj->base.refcount)) 1746 continue; 1747 1748 if (!i915_vm_tryget(vm)) { 1749 i915_gem_object_put(obj); 1750 continue; 1751 } 1752 1753 list_move(&vma->closed_link, &closed); 1754 } 1755 spin_unlock_irq(>->closed_lock); 1756 1757 /* As the GT is held idle, no vma can be reopened as we destroy them */ 1758 list_for_each_entry_safe(vma, next, &closed, closed_link) { 1759 struct drm_i915_gem_object *obj = vma->obj; 1760 struct i915_address_space *vm = vma->vm; 1761 1762 if (i915_gem_object_trylock(obj, NULL)) { 1763 INIT_LIST_HEAD(&vma->closed_link); 1764 i915_vma_destroy(vma); 1765 i915_gem_object_unlock(obj); 1766 } else { 1767 /* back you go.. */ 1768 spin_lock_irq(>->closed_lock); 1769 list_add(&vma->closed_link, >->closed_vma); 1770 spin_unlock_irq(>->closed_lock); 1771 } 1772 1773 i915_gem_object_put(obj); 1774 i915_vm_put(vm); 1775 } 1776 } 1777 1778 static void __i915_vma_iounmap(struct i915_vma *vma) 1779 { 1780 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1781 1782 if (vma->iomap == NULL) 1783 return; 1784 1785 if (page_unmask_bits(vma->iomap)) 1786 __i915_gem_object_release_map(vma->obj); 1787 else 1788 io_mapping_unmap(vma->iomap); 1789 vma->iomap = NULL; 1790 } 1791 1792 void i915_vma_revoke_mmap(struct i915_vma *vma) 1793 { 1794 struct drm_vma_offset_node *node; 1795 u64 vma_offset; 1796 1797 if (!i915_vma_has_userfault(vma)) 1798 return; 1799 1800 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1801 GEM_BUG_ON(!vma->obj->userfault_count); 1802 1803 node = &vma->mmo->vma_node; 1804 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT; 1805 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1806 drm_vma_node_offset_addr(node) + vma_offset, 1807 vma->size, 1808 1); 1809 1810 i915_vma_unset_userfault(vma); 1811 if (!--vma->obj->userfault_count) 1812 list_del(&vma->obj->userfault_link); 1813 } 1814 1815 static int 1816 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) 1817 { 1818 return __i915_request_await_exclusive(rq, &vma->active); 1819 } 1820 1821 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 1822 { 1823 int err; 1824 1825 /* Wait for the vma to be bound before we start! */ 1826 err = __i915_request_await_bind(rq, vma); 1827 if (err) 1828 return err; 1829 1830 return i915_active_add_request(&vma->active, rq); 1831 } 1832 1833 int _i915_vma_move_to_active(struct i915_vma *vma, 1834 struct i915_request *rq, 1835 struct dma_fence *fence, 1836 unsigned int flags) 1837 { 1838 struct drm_i915_gem_object *obj = vma->obj; 1839 int err; 1840 1841 assert_object_held(obj); 1842 1843 GEM_BUG_ON(!vma->pages); 1844 1845 err = __i915_vma_move_to_active(vma, rq); 1846 if (unlikely(err)) 1847 return err; 1848 1849 /* 1850 * Reserve fences slot early to prevent an allocation after preparing 1851 * the workload and associating fences with dma_resv. 1852 */ 1853 if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { 1854 struct dma_fence *curr; 1855 int idx; 1856 1857 dma_fence_array_for_each(curr, idx, fence) 1858 ; 1859 err = dma_resv_reserve_fences(vma->obj->base.resv, idx); 1860 if (unlikely(err)) 1861 return err; 1862 } 1863 1864 if (flags & EXEC_OBJECT_WRITE) { 1865 struct intel_frontbuffer *front; 1866 1867 front = __intel_frontbuffer_get(obj); 1868 if (unlikely(front)) { 1869 if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1870 i915_active_add_request(&front->write, rq); 1871 intel_frontbuffer_put(front); 1872 } 1873 } 1874 1875 if (fence) { 1876 struct dma_fence *curr; 1877 enum dma_resv_usage usage; 1878 int idx; 1879 1880 if (flags & EXEC_OBJECT_WRITE) { 1881 usage = DMA_RESV_USAGE_WRITE; 1882 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1883 obj->read_domains = 0; 1884 } else { 1885 usage = DMA_RESV_USAGE_READ; 1886 obj->write_domain = 0; 1887 } 1888 1889 dma_fence_array_for_each(curr, idx, fence) 1890 dma_resv_add_fence(vma->obj->base.resv, curr, usage); 1891 } 1892 1893 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 1894 i915_active_add_request(&vma->fence->active, rq); 1895 1896 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1897 obj->mm.dirty = true; 1898 1899 GEM_BUG_ON(!i915_vma_is_active(vma)); 1900 return 0; 1901 } 1902 1903 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) 1904 { 1905 struct i915_vma_resource *vma_res = vma->resource; 1906 struct dma_fence *unbind_fence; 1907 1908 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1909 assert_vma_held_evict(vma); 1910 1911 if (i915_vma_is_map_and_fenceable(vma)) { 1912 /* Force a pagefault for domain tracking on next user access */ 1913 i915_vma_revoke_mmap(vma); 1914 1915 /* 1916 * Check that we have flushed all writes through the GGTT 1917 * before the unbind, other due to non-strict nature of those 1918 * indirect writes they may end up referencing the GGTT PTE 1919 * after the unbind. 1920 * 1921 * Note that we may be concurrently poking at the GGTT_WRITE 1922 * bit from set-domain, as we mark all GGTT vma associated 1923 * with an object. We know this is for another vma, as we 1924 * are currently unbinding this one -- so if this vma will be 1925 * reused, it will be refaulted and have its dirty bit set 1926 * before the next write. 1927 */ 1928 i915_vma_flush_writes(vma); 1929 1930 /* release the fence reg _after_ flushing */ 1931 i915_vma_revoke_fence(vma); 1932 1933 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1934 } 1935 1936 __i915_vma_iounmap(vma); 1937 1938 GEM_BUG_ON(vma->fence); 1939 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1940 1941 /* Object backend must be async capable. */ 1942 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); 1943 1944 /* If vm is not open, unbind is a nop. */ 1945 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && 1946 kref_read(&vma->vm->ref); 1947 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) || 1948 vma->vm->skip_pte_rewrite; 1949 trace_i915_vma_unbind(vma); 1950 1951 if (async) 1952 unbind_fence = i915_vma_resource_unbind(vma_res, 1953 &vma->obj->mm.tlb); 1954 else 1955 unbind_fence = i915_vma_resource_unbind(vma_res, NULL); 1956 1957 vma->resource = NULL; 1958 1959 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 1960 &vma->flags); 1961 1962 i915_vma_detach(vma); 1963 1964 if (!async) { 1965 if (unbind_fence) { 1966 dma_fence_wait(unbind_fence, false); 1967 dma_fence_put(unbind_fence); 1968 unbind_fence = NULL; 1969 } 1970 vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb); 1971 } 1972 1973 /* 1974 * Binding itself may not have completed until the unbind fence signals, 1975 * so don't drop the pages until that happens, unless the resource is 1976 * async_capable. 1977 */ 1978 1979 vma_unbind_pages(vma); 1980 return unbind_fence; 1981 } 1982 1983 int __i915_vma_unbind(struct i915_vma *vma) 1984 { 1985 int ret; 1986 1987 lockdep_assert_held(&vma->vm->mutex); 1988 assert_vma_held_evict(vma); 1989 1990 if (!drm_mm_node_allocated(&vma->node)) 1991 return 0; 1992 1993 if (i915_vma_is_pinned(vma)) { 1994 vma_print_allocator(vma, "is pinned"); 1995 return -EAGAIN; 1996 } 1997 1998 /* 1999 * After confirming that no one else is pinning this vma, wait for 2000 * any laggards who may have crept in during the wait (through 2001 * a residual pin skipping the vm->mutex) to complete. 2002 */ 2003 ret = i915_vma_sync(vma); 2004 if (ret) 2005 return ret; 2006 2007 GEM_BUG_ON(i915_vma_is_active(vma)); 2008 __i915_vma_evict(vma, false); 2009 2010 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 2011 return 0; 2012 } 2013 2014 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) 2015 { 2016 struct dma_fence *fence; 2017 2018 lockdep_assert_held(&vma->vm->mutex); 2019 2020 if (!drm_mm_node_allocated(&vma->node)) 2021 return NULL; 2022 2023 if (i915_vma_is_pinned(vma) || 2024 &vma->obj->mm.rsgt->table != vma->resource->bi.pages) 2025 return ERR_PTR(-EAGAIN); 2026 2027 /* 2028 * We probably need to replace this with awaiting the fences of the 2029 * object's dma_resv when the vma active goes away. When doing that 2030 * we need to be careful to not add the vma_resource unbind fence 2031 * immediately to the object's dma_resv, because then unbinding 2032 * the next vma from the object, in case there are many, will 2033 * actually await the unbinding of the previous vmas, which is 2034 * undesirable. 2035 */ 2036 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active, 2037 I915_ACTIVE_AWAIT_EXCL | 2038 I915_ACTIVE_AWAIT_ACTIVE) < 0) { 2039 return ERR_PTR(-EBUSY); 2040 } 2041 2042 fence = __i915_vma_evict(vma, true); 2043 2044 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 2045 2046 return fence; 2047 } 2048 2049 int i915_vma_unbind(struct i915_vma *vma) 2050 { 2051 struct i915_address_space *vm = vma->vm; 2052 intel_wakeref_t wakeref = 0; 2053 int err; 2054 2055 assert_object_held_shared(vma->obj); 2056 2057 /* Optimistic wait before taking the mutex */ 2058 err = i915_vma_sync(vma); 2059 if (err) 2060 return err; 2061 2062 if (!drm_mm_node_allocated(&vma->node)) 2063 return 0; 2064 2065 if (i915_vma_is_pinned(vma)) { 2066 vma_print_allocator(vma, "is pinned"); 2067 return -EAGAIN; 2068 } 2069 2070 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 2071 /* XXX not always required: nop_clear_range */ 2072 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 2073 2074 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 2075 if (err) 2076 goto out_rpm; 2077 2078 err = __i915_vma_unbind(vma); 2079 mutex_unlock(&vm->mutex); 2080 2081 out_rpm: 2082 if (wakeref) 2083 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 2084 return err; 2085 } 2086 2087 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) 2088 { 2089 struct drm_i915_gem_object *obj = vma->obj; 2090 struct i915_address_space *vm = vma->vm; 2091 intel_wakeref_t wakeref = 0; 2092 struct dma_fence *fence; 2093 int err; 2094 2095 /* 2096 * We need the dma-resv lock since we add the 2097 * unbind fence to the dma-resv object. 2098 */ 2099 assert_object_held(obj); 2100 2101 if (!drm_mm_node_allocated(&vma->node)) 2102 return 0; 2103 2104 if (i915_vma_is_pinned(vma)) { 2105 vma_print_allocator(vma, "is pinned"); 2106 return -EAGAIN; 2107 } 2108 2109 if (!obj->mm.rsgt) 2110 return -EBUSY; 2111 2112 err = dma_resv_reserve_fences(obj->base.resv, 1); 2113 if (err) 2114 return -EBUSY; 2115 2116 /* 2117 * It would be great if we could grab this wakeref from the 2118 * async unbind work if needed, but we can't because it uses 2119 * kmalloc and it's in the dma-fence signalling critical path. 2120 */ 2121 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 2122 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 2123 2124 if (trylock_vm && !mutex_trylock(&vm->mutex)) { 2125 err = -EBUSY; 2126 goto out_rpm; 2127 } else if (!trylock_vm) { 2128 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); 2129 if (err) 2130 goto out_rpm; 2131 } 2132 2133 fence = __i915_vma_unbind_async(vma); 2134 mutex_unlock(&vm->mutex); 2135 if (IS_ERR_OR_NULL(fence)) { 2136 err = PTR_ERR_OR_ZERO(fence); 2137 goto out_rpm; 2138 } 2139 2140 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); 2141 dma_fence_put(fence); 2142 2143 out_rpm: 2144 if (wakeref) 2145 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 2146 return err; 2147 } 2148 2149 int i915_vma_unbind_unlocked(struct i915_vma *vma) 2150 { 2151 int err; 2152 2153 i915_gem_object_lock(vma->obj, NULL); 2154 err = i915_vma_unbind(vma); 2155 i915_gem_object_unlock(vma->obj); 2156 2157 return err; 2158 } 2159 2160 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 2161 { 2162 i915_gem_object_make_unshrinkable(vma->obj); 2163 return vma; 2164 } 2165 2166 void i915_vma_make_shrinkable(struct i915_vma *vma) 2167 { 2168 i915_gem_object_make_shrinkable(vma->obj); 2169 } 2170 2171 void i915_vma_make_purgeable(struct i915_vma *vma) 2172 { 2173 i915_gem_object_make_purgeable(vma->obj); 2174 } 2175 2176 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2177 #include "selftests/i915_vma.c" 2178 #endif 2179 2180 void i915_vma_module_exit(void) 2181 { 2182 kmem_cache_destroy(slab_vmas); 2183 } 2184 2185 int __init i915_vma_module_init(void) 2186 { 2187 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 2188 if (!slab_vmas) 2189 return -ENOMEM; 2190 2191 return 0; 2192 } 2193