1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <drm/drm_gem.h> 27 28 #include "display/intel_frontbuffer.h" 29 30 #include "gem/i915_gem_lmem.h" 31 #include "gt/intel_engine.h" 32 #include "gt/intel_engine_heartbeat.h" 33 #include "gt/intel_gt.h" 34 #include "gt/intel_gt_requests.h" 35 36 #include "i915_drv.h" 37 #include "i915_sw_fence_work.h" 38 #include "i915_trace.h" 39 #include "i915_vma.h" 40 #include "i915_vma_resource.h" 41 42 static struct kmem_cache *slab_vmas; 43 44 static struct i915_vma *i915_vma_alloc(void) 45 { 46 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); 47 } 48 49 static void i915_vma_free(struct i915_vma *vma) 50 { 51 return kmem_cache_free(slab_vmas, vma); 52 } 53 54 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 55 56 #include <linux/stackdepot.h> 57 58 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 59 { 60 char buf[512]; 61 62 if (!vma->node.stack) { 63 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 64 vma->node.start, vma->node.size, reason); 65 return; 66 } 67 68 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); 69 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 70 vma->node.start, vma->node.size, reason, buf); 71 } 72 73 #else 74 75 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 76 { 77 } 78 79 #endif 80 81 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 82 { 83 return container_of(ref, typeof(struct i915_vma), active); 84 } 85 86 static int __i915_vma_active(struct i915_active *ref) 87 { 88 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 89 } 90 91 static void __i915_vma_retire(struct i915_active *ref) 92 { 93 i915_vma_put(active_to_vma(ref)); 94 } 95 96 static struct i915_vma * 97 vma_create(struct drm_i915_gem_object *obj, 98 struct i915_address_space *vm, 99 const struct i915_ggtt_view *view) 100 { 101 struct i915_vma *pos = ERR_PTR(-E2BIG); 102 struct i915_vma *vma; 103 struct rb_node *rb, **p; 104 105 /* The aliasing_ppgtt should never be used directly! */ 106 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 107 108 vma = i915_vma_alloc(); 109 if (vma == NULL) 110 return ERR_PTR(-ENOMEM); 111 112 kref_init(&vma->ref); 113 vma->vm = i915_vm_get(vm); 114 vma->ops = &vm->vma_ops; 115 vma->obj = obj; 116 vma->size = obj->base.size; 117 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 118 119 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); 120 121 /* Declare ourselves safe for use inside shrinkers */ 122 if (IS_ENABLED(CONFIG_LOCKDEP)) { 123 fs_reclaim_acquire(GFP_KERNEL); 124 might_lock(&vma->active.mutex); 125 fs_reclaim_release(GFP_KERNEL); 126 } 127 128 INIT_LIST_HEAD(&vma->closed_link); 129 130 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 131 vma->ggtt_view = *view; 132 if (view->type == I915_GGTT_VIEW_PARTIAL) { 133 GEM_BUG_ON(range_overflows_t(u64, 134 view->partial.offset, 135 view->partial.size, 136 obj->base.size >> PAGE_SHIFT)); 137 vma->size = view->partial.size; 138 vma->size <<= PAGE_SHIFT; 139 GEM_BUG_ON(vma->size > obj->base.size); 140 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 141 vma->size = intel_rotation_info_size(&view->rotated); 142 vma->size <<= PAGE_SHIFT; 143 } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 144 vma->size = intel_remapped_info_size(&view->remapped); 145 vma->size <<= PAGE_SHIFT; 146 } 147 } 148 149 if (unlikely(vma->size > vm->total)) 150 goto err_vma; 151 152 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 153 154 spin_lock(&obj->vma.lock); 155 156 if (i915_is_ggtt(vm)) { 157 if (unlikely(overflows_type(vma->size, u32))) 158 goto err_unlock; 159 160 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 161 i915_gem_object_get_tiling(obj), 162 i915_gem_object_get_stride(obj)); 163 if (unlikely(vma->fence_size < vma->size || /* overflow */ 164 vma->fence_size > vm->total)) 165 goto err_unlock; 166 167 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 168 169 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 170 i915_gem_object_get_tiling(obj), 171 i915_gem_object_get_stride(obj)); 172 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 173 174 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 175 } 176 177 rb = NULL; 178 p = &obj->vma.tree.rb_node; 179 while (*p) { 180 long cmp; 181 182 rb = *p; 183 pos = rb_entry(rb, struct i915_vma, obj_node); 184 185 /* 186 * If the view already exists in the tree, another thread 187 * already created a matching vma, so return the older instance 188 * and dispose of ours. 189 */ 190 cmp = i915_vma_compare(pos, vm, view); 191 if (cmp < 0) 192 p = &rb->rb_right; 193 else if (cmp > 0) 194 p = &rb->rb_left; 195 else 196 goto err_unlock; 197 } 198 rb_link_node(&vma->obj_node, rb, p); 199 rb_insert_color(&vma->obj_node, &obj->vma.tree); 200 201 if (i915_vma_is_ggtt(vma)) 202 /* 203 * We put the GGTT vma at the start of the vma-list, followed 204 * by the ppGGTT vma. This allows us to break early when 205 * iterating over only the GGTT vma for an object, see 206 * for_each_ggtt_vma() 207 */ 208 list_add(&vma->obj_link, &obj->vma.list); 209 else 210 list_add_tail(&vma->obj_link, &obj->vma.list); 211 212 spin_unlock(&obj->vma.lock); 213 214 return vma; 215 216 err_unlock: 217 spin_unlock(&obj->vma.lock); 218 err_vma: 219 i915_vm_put(vm); 220 i915_vma_free(vma); 221 return pos; 222 } 223 224 static struct i915_vma * 225 i915_vma_lookup(struct drm_i915_gem_object *obj, 226 struct i915_address_space *vm, 227 const struct i915_ggtt_view *view) 228 { 229 struct rb_node *rb; 230 231 rb = obj->vma.tree.rb_node; 232 while (rb) { 233 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 234 long cmp; 235 236 cmp = i915_vma_compare(vma, vm, view); 237 if (cmp == 0) 238 return vma; 239 240 if (cmp < 0) 241 rb = rb->rb_right; 242 else 243 rb = rb->rb_left; 244 } 245 246 return NULL; 247 } 248 249 /** 250 * i915_vma_instance - return the singleton instance of the VMA 251 * @obj: parent &struct drm_i915_gem_object to be mapped 252 * @vm: address space in which the mapping is located 253 * @view: additional mapping requirements 254 * 255 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 256 * the same @view characteristics. If a match is not found, one is created. 257 * Once created, the VMA is kept until either the object is freed, or the 258 * address space is closed. 259 * 260 * Returns the vma, or an error pointer. 261 */ 262 struct i915_vma * 263 i915_vma_instance(struct drm_i915_gem_object *obj, 264 struct i915_address_space *vm, 265 const struct i915_ggtt_view *view) 266 { 267 struct i915_vma *vma; 268 269 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); 270 GEM_BUG_ON(!atomic_read(&vm->open)); 271 272 spin_lock(&obj->vma.lock); 273 vma = i915_vma_lookup(obj, vm, view); 274 spin_unlock(&obj->vma.lock); 275 276 /* vma_create() will resolve the race if another creates the vma */ 277 if (unlikely(!vma)) 278 vma = vma_create(obj, vm, view); 279 280 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 281 return vma; 282 } 283 284 struct i915_vma_work { 285 struct dma_fence_work base; 286 struct i915_address_space *vm; 287 struct i915_vm_pt_stash stash; 288 struct i915_vma_resource *vma_res; 289 struct drm_i915_gem_object *pinned; 290 struct i915_sw_dma_fence_cb cb; 291 enum i915_cache_level cache_level; 292 unsigned int flags; 293 }; 294 295 static void __vma_bind(struct dma_fence_work *work) 296 { 297 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 298 struct i915_vma_resource *vma_res = vw->vma_res; 299 300 vma_res->ops->bind_vma(vma_res->vm, &vw->stash, 301 vma_res, vw->cache_level, vw->flags); 302 303 } 304 305 static void __vma_release(struct dma_fence_work *work) 306 { 307 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 308 309 if (vw->pinned) 310 i915_gem_object_put(vw->pinned); 311 312 i915_vm_free_pt_stash(vw->vm, &vw->stash); 313 i915_vm_put(vw->vm); 314 if (vw->vma_res) 315 i915_vma_resource_put(vw->vma_res); 316 } 317 318 static const struct dma_fence_work_ops bind_ops = { 319 .name = "bind", 320 .work = __vma_bind, 321 .release = __vma_release, 322 }; 323 324 struct i915_vma_work *i915_vma_work(void) 325 { 326 struct i915_vma_work *vw; 327 328 vw = kzalloc(sizeof(*vw), GFP_KERNEL); 329 if (!vw) 330 return NULL; 331 332 dma_fence_work_init(&vw->base, &bind_ops); 333 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 334 335 return vw; 336 } 337 338 int i915_vma_wait_for_bind(struct i915_vma *vma) 339 { 340 int err = 0; 341 342 if (rcu_access_pointer(vma->active.excl.fence)) { 343 struct dma_fence *fence; 344 345 rcu_read_lock(); 346 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 347 rcu_read_unlock(); 348 if (fence) { 349 err = dma_fence_wait(fence, true); 350 dma_fence_put(fence); 351 } 352 } 353 354 return err; 355 } 356 357 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 358 static int i915_vma_verify_bind_complete(struct i915_vma *vma) 359 { 360 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); 361 int err; 362 363 if (!fence) 364 return 0; 365 366 if (dma_fence_is_signaled(fence)) 367 err = fence->error; 368 else 369 err = -EBUSY; 370 371 dma_fence_put(fence); 372 373 return err; 374 } 375 #else 376 #define i915_vma_verify_bind_complete(_vma) 0 377 #endif 378 379 I915_SELFTEST_EXPORT void 380 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, 381 struct i915_vma *vma) 382 { 383 struct drm_i915_gem_object *obj = vma->obj; 384 385 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, 386 obj->mm.rsgt, i915_gem_object_is_readonly(obj), 387 i915_gem_object_is_lmem(obj), obj->mm.region, 388 vma->ops, vma->private, vma->node.start, 389 vma->node.size, vma->size); 390 } 391 392 /** 393 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 394 * @vma: VMA to map 395 * @cache_level: mapping cache level 396 * @flags: flags like global or local mapping 397 * @work: preallocated worker for allocating and binding the PTE 398 * @vma_res: pointer to a preallocated vma resource. The resource is either 399 * consumed or freed. 400 * 401 * DMA addresses are taken from the scatter-gather table of this object (or of 402 * this VMA in case of non-default GGTT views) and PTE entries set up. 403 * Note that DMA addresses are also the only part of the SG table we care about. 404 */ 405 int i915_vma_bind(struct i915_vma *vma, 406 enum i915_cache_level cache_level, 407 u32 flags, 408 struct i915_vma_work *work, 409 struct i915_vma_resource *vma_res) 410 { 411 u32 bind_flags; 412 u32 vma_flags; 413 int ret; 414 415 lockdep_assert_held(&vma->vm->mutex); 416 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 417 GEM_BUG_ON(vma->size > vma->node.size); 418 419 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 420 vma->node.size, 421 vma->vm->total))) { 422 i915_vma_resource_free(vma_res); 423 return -ENODEV; 424 } 425 426 if (GEM_DEBUG_WARN_ON(!flags)) { 427 i915_vma_resource_free(vma_res); 428 return -EINVAL; 429 } 430 431 bind_flags = flags; 432 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 433 434 vma_flags = atomic_read(&vma->flags); 435 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 436 437 bind_flags &= ~vma_flags; 438 if (bind_flags == 0) { 439 i915_vma_resource_free(vma_res); 440 return 0; 441 } 442 443 GEM_BUG_ON(!atomic_read(&vma->pages_count)); 444 445 /* Wait for or await async unbinds touching our range */ 446 if (work && bind_flags & vma->vm->bind_async_flags) 447 ret = i915_vma_resource_bind_dep_await(vma->vm, 448 &work->base.chain, 449 vma->node.start, 450 vma->node.size, 451 true, 452 GFP_NOWAIT | 453 __GFP_RETRY_MAYFAIL | 454 __GFP_NOWARN); 455 else 456 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start, 457 vma->node.size, true); 458 if (ret) { 459 i915_vma_resource_free(vma_res); 460 return ret; 461 } 462 463 if (vma->resource || !vma_res) { 464 /* Rebinding with an additional I915_VMA_*_BIND */ 465 GEM_WARN_ON(!vma_flags); 466 kfree(vma_res); 467 } else { 468 i915_vma_resource_init_from_vma(vma_res, vma); 469 vma->resource = vma_res; 470 } 471 trace_i915_vma_bind(vma, bind_flags); 472 if (work && bind_flags & vma->vm->bind_async_flags) { 473 struct dma_fence *prev; 474 475 work->vma_res = i915_vma_resource_get(vma->resource); 476 work->cache_level = cache_level; 477 work->flags = bind_flags; 478 479 /* 480 * Note we only want to chain up to the migration fence on 481 * the pages (not the object itself). As we don't track that, 482 * yet, we have to use the exclusive fence instead. 483 * 484 * Also note that we do not want to track the async vma as 485 * part of the obj->resv->excl_fence as it only affects 486 * execution and not content or object's backing store lifetime. 487 */ 488 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 489 if (prev) { 490 __i915_sw_fence_await_dma_fence(&work->base.chain, 491 prev, 492 &work->cb); 493 dma_fence_put(prev); 494 } 495 496 work->base.dma.error = 0; /* enable the queue_work() */ 497 498 /* 499 * If we don't have the refcounted pages list, keep a reference 500 * on the object to avoid waiting for the async bind to 501 * complete in the object destruction path. 502 */ 503 if (!work->vma_res->bi.pages_rsgt) 504 work->pinned = i915_gem_object_get(vma->obj); 505 } else { 506 if (vma->obj) { 507 int ret; 508 509 ret = i915_gem_object_wait_moving_fence(vma->obj, true); 510 if (ret) { 511 i915_vma_resource_free(vma->resource); 512 vma->resource = NULL; 513 514 return ret; 515 } 516 } 517 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level, 518 bind_flags); 519 } 520 521 atomic_or(bind_flags, &vma->flags); 522 return 0; 523 } 524 525 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 526 { 527 void __iomem *ptr; 528 int err; 529 530 if (!i915_gem_object_is_lmem(vma->obj)) { 531 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 532 err = -ENODEV; 533 goto err; 534 } 535 } 536 537 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 538 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 539 GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); 540 541 ptr = READ_ONCE(vma->iomap); 542 if (ptr == NULL) { 543 /* 544 * TODO: consider just using i915_gem_object_pin_map() for lmem 545 * instead, which already supports mapping non-contiguous chunks 546 * of pages, that way we can also drop the 547 * I915_BO_ALLOC_CONTIGUOUS when allocating the object. 548 */ 549 if (i915_gem_object_is_lmem(vma->obj)) 550 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, 551 vma->obj->base.size); 552 else 553 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 554 vma->node.start, 555 vma->node.size); 556 if (ptr == NULL) { 557 err = -ENOMEM; 558 goto err; 559 } 560 561 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 562 io_mapping_unmap(ptr); 563 ptr = vma->iomap; 564 } 565 } 566 567 __i915_vma_pin(vma); 568 569 err = i915_vma_pin_fence(vma); 570 if (err) 571 goto err_unpin; 572 573 i915_vma_set_ggtt_write(vma); 574 575 /* NB Access through the GTT requires the device to be awake. */ 576 return ptr; 577 578 err_unpin: 579 __i915_vma_unpin(vma); 580 err: 581 return IO_ERR_PTR(err); 582 } 583 584 void i915_vma_flush_writes(struct i915_vma *vma) 585 { 586 if (i915_vma_unset_ggtt_write(vma)) 587 intel_gt_flush_ggtt_writes(vma->vm->gt); 588 } 589 590 void i915_vma_unpin_iomap(struct i915_vma *vma) 591 { 592 GEM_BUG_ON(vma->iomap == NULL); 593 594 i915_vma_flush_writes(vma); 595 596 i915_vma_unpin_fence(vma); 597 i915_vma_unpin(vma); 598 } 599 600 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 601 { 602 struct i915_vma *vma; 603 struct drm_i915_gem_object *obj; 604 605 vma = fetch_and_zero(p_vma); 606 if (!vma) 607 return; 608 609 obj = vma->obj; 610 GEM_BUG_ON(!obj); 611 612 i915_vma_unpin(vma); 613 614 if (flags & I915_VMA_RELEASE_MAP) 615 i915_gem_object_unpin_map(obj); 616 617 i915_gem_object_put(obj); 618 } 619 620 bool i915_vma_misplaced(const struct i915_vma *vma, 621 u64 size, u64 alignment, u64 flags) 622 { 623 if (!drm_mm_node_allocated(&vma->node)) 624 return false; 625 626 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 627 return true; 628 629 if (vma->node.size < size) 630 return true; 631 632 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 633 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 634 return true; 635 636 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 637 return true; 638 639 if (flags & PIN_OFFSET_BIAS && 640 vma->node.start < (flags & PIN_OFFSET_MASK)) 641 return true; 642 643 if (flags & PIN_OFFSET_FIXED && 644 vma->node.start != (flags & PIN_OFFSET_MASK)) 645 return true; 646 647 return false; 648 } 649 650 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 651 { 652 bool mappable, fenceable; 653 654 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 655 GEM_BUG_ON(!vma->fence_size); 656 657 fenceable = (vma->node.size >= vma->fence_size && 658 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 659 660 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 661 662 if (mappable && fenceable) 663 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 664 else 665 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 666 } 667 668 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 669 { 670 struct drm_mm_node *node = &vma->node; 671 struct drm_mm_node *other; 672 673 /* 674 * On some machines we have to be careful when putting differing types 675 * of snoopable memory together to avoid the prefetcher crossing memory 676 * domains and dying. During vm initialisation, we decide whether or not 677 * these constraints apply and set the drm_mm.color_adjust 678 * appropriately. 679 */ 680 if (!i915_vm_has_cache_coloring(vma->vm)) 681 return true; 682 683 /* Only valid to be called on an already inserted vma */ 684 GEM_BUG_ON(!drm_mm_node_allocated(node)); 685 GEM_BUG_ON(list_empty(&node->node_list)); 686 687 other = list_prev_entry(node, node_list); 688 if (i915_node_color_differs(other, color) && 689 !drm_mm_hole_follows(other)) 690 return false; 691 692 other = list_next_entry(node, node_list); 693 if (i915_node_color_differs(other, color) && 694 !drm_mm_hole_follows(node)) 695 return false; 696 697 return true; 698 } 699 700 /** 701 * i915_vma_insert - finds a slot for the vma in its address space 702 * @vma: the vma 703 * @size: requested size in bytes (can be larger than the VMA) 704 * @alignment: required alignment 705 * @flags: mask of PIN_* flags to use 706 * 707 * First we try to allocate some free space that meets the requirements for 708 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 709 * preferrably the oldest idle entry to make room for the new VMA. 710 * 711 * Returns: 712 * 0 on success, negative error code otherwise. 713 */ 714 static int 715 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 716 u64 size, u64 alignment, u64 flags) 717 { 718 unsigned long color; 719 u64 start, end; 720 int ret; 721 722 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 723 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 724 725 size = max(size, vma->size); 726 alignment = max(alignment, vma->display_alignment); 727 if (flags & PIN_MAPPABLE) { 728 size = max_t(typeof(size), size, vma->fence_size); 729 alignment = max_t(typeof(alignment), 730 alignment, vma->fence_alignment); 731 } 732 733 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 734 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 735 GEM_BUG_ON(!is_power_of_2(alignment)); 736 737 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 738 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 739 740 end = vma->vm->total; 741 if (flags & PIN_MAPPABLE) 742 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 743 if (flags & PIN_ZONE_4G) 744 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 745 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 746 747 /* If binding the object/GGTT view requires more space than the entire 748 * aperture has, reject it early before evicting everything in a vain 749 * attempt to find space. 750 */ 751 if (size > end) { 752 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 753 size, flags & PIN_MAPPABLE ? "mappable" : "total", 754 end); 755 return -ENOSPC; 756 } 757 758 color = 0; 759 if (i915_vm_has_cache_coloring(vma->vm)) 760 color = vma->obj->cache_level; 761 762 if (flags & PIN_OFFSET_FIXED) { 763 u64 offset = flags & PIN_OFFSET_MASK; 764 if (!IS_ALIGNED(offset, alignment) || 765 range_overflows(offset, size, end)) 766 return -EINVAL; 767 768 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, 769 size, offset, color, 770 flags); 771 if (ret) 772 return ret; 773 } else { 774 /* 775 * We only support huge gtt pages through the 48b PPGTT, 776 * however we also don't want to force any alignment for 777 * objects which need to be tightly packed into the low 32bits. 778 * 779 * Note that we assume that GGTT are limited to 4GiB for the 780 * forseeable future. See also i915_ggtt_offset(). 781 */ 782 if (upper_32_bits(end - 1) && 783 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 784 /* 785 * We can't mix 64K and 4K PTEs in the same page-table 786 * (2M block), and so to avoid the ugliness and 787 * complexity of coloring we opt for just aligning 64K 788 * objects to 2M. 789 */ 790 u64 page_alignment = 791 rounddown_pow_of_two(vma->page_sizes.sg | 792 I915_GTT_PAGE_SIZE_2M); 793 794 /* 795 * Check we don't expand for the limited Global GTT 796 * (mappable aperture is even more precious!). This 797 * also checks that we exclude the aliasing-ppgtt. 798 */ 799 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 800 801 alignment = max(alignment, page_alignment); 802 803 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 804 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 805 } 806 807 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node, 808 size, alignment, color, 809 start, end, flags); 810 if (ret) 811 return ret; 812 813 GEM_BUG_ON(vma->node.start < start); 814 GEM_BUG_ON(vma->node.start + vma->node.size > end); 815 } 816 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 817 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 818 819 list_add_tail(&vma->vm_link, &vma->vm->bound_list); 820 821 return 0; 822 } 823 824 static void 825 i915_vma_detach(struct i915_vma *vma) 826 { 827 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 828 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 829 830 /* 831 * And finally now the object is completely decoupled from this 832 * vma, we can drop its hold on the backing storage and allow 833 * it to be reaped by the shrinker. 834 */ 835 list_del(&vma->vm_link); 836 } 837 838 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 839 { 840 unsigned int bound; 841 842 bound = atomic_read(&vma->flags); 843 844 if (flags & PIN_VALIDATE) { 845 flags &= I915_VMA_BIND_MASK; 846 847 return (flags & bound) == flags; 848 } 849 850 /* with the lock mandatory for unbind, we don't race here */ 851 flags &= I915_VMA_BIND_MASK; 852 do { 853 if (unlikely(flags & ~bound)) 854 return false; 855 856 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 857 return false; 858 859 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 860 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 861 862 return true; 863 } 864 865 static struct scatterlist * 866 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 867 unsigned int width, unsigned int height, 868 unsigned int src_stride, unsigned int dst_stride, 869 struct sg_table *st, struct scatterlist *sg) 870 { 871 unsigned int column, row; 872 unsigned int src_idx; 873 874 for (column = 0; column < width; column++) { 875 unsigned int left; 876 877 src_idx = src_stride * (height - 1) + column + offset; 878 for (row = 0; row < height; row++) { 879 st->nents++; 880 /* 881 * We don't need the pages, but need to initialize 882 * the entries so the sg list can be happily traversed. 883 * The only thing we need are DMA addresses. 884 */ 885 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 886 sg_dma_address(sg) = 887 i915_gem_object_get_dma_address(obj, src_idx); 888 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 889 sg = sg_next(sg); 890 src_idx -= src_stride; 891 } 892 893 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; 894 895 if (!left) 896 continue; 897 898 st->nents++; 899 900 /* 901 * The DE ignores the PTEs for the padding tiles, the sg entry 902 * here is just a conenience to indicate how many padding PTEs 903 * to insert at this spot. 904 */ 905 sg_set_page(sg, NULL, left, 0); 906 sg_dma_address(sg) = 0; 907 sg_dma_len(sg) = left; 908 sg = sg_next(sg); 909 } 910 911 return sg; 912 } 913 914 static noinline struct sg_table * 915 intel_rotate_pages(struct intel_rotation_info *rot_info, 916 struct drm_i915_gem_object *obj) 917 { 918 unsigned int size = intel_rotation_info_size(rot_info); 919 struct drm_i915_private *i915 = to_i915(obj->base.dev); 920 struct sg_table *st; 921 struct scatterlist *sg; 922 int ret = -ENOMEM; 923 int i; 924 925 /* Allocate target SG list. */ 926 st = kmalloc(sizeof(*st), GFP_KERNEL); 927 if (!st) 928 goto err_st_alloc; 929 930 ret = sg_alloc_table(st, size, GFP_KERNEL); 931 if (ret) 932 goto err_sg_alloc; 933 934 st->nents = 0; 935 sg = st->sgl; 936 937 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 938 sg = rotate_pages(obj, rot_info->plane[i].offset, 939 rot_info->plane[i].width, rot_info->plane[i].height, 940 rot_info->plane[i].src_stride, 941 rot_info->plane[i].dst_stride, 942 st, sg); 943 944 return st; 945 946 err_sg_alloc: 947 kfree(st); 948 err_st_alloc: 949 950 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 951 obj->base.size, rot_info->plane[0].width, 952 rot_info->plane[0].height, size); 953 954 return ERR_PTR(ret); 955 } 956 957 static struct scatterlist * 958 remap_pages(struct drm_i915_gem_object *obj, 959 unsigned int offset, unsigned int alignment_pad, 960 unsigned int width, unsigned int height, 961 unsigned int src_stride, unsigned int dst_stride, 962 struct sg_table *st, struct scatterlist *sg) 963 { 964 unsigned int row; 965 966 if (!width || !height) 967 return sg; 968 969 if (alignment_pad) { 970 st->nents++; 971 972 /* 973 * The DE ignores the PTEs for the padding tiles, the sg entry 974 * here is just a convenience to indicate how many padding PTEs 975 * to insert at this spot. 976 */ 977 sg_set_page(sg, NULL, alignment_pad * 4096, 0); 978 sg_dma_address(sg) = 0; 979 sg_dma_len(sg) = alignment_pad * 4096; 980 sg = sg_next(sg); 981 } 982 983 for (row = 0; row < height; row++) { 984 unsigned int left = width * I915_GTT_PAGE_SIZE; 985 986 while (left) { 987 dma_addr_t addr; 988 unsigned int length; 989 990 /* 991 * We don't need the pages, but need to initialize 992 * the entries so the sg list can be happily traversed. 993 * The only thing we need are DMA addresses. 994 */ 995 996 addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 997 998 length = min(left, length); 999 1000 st->nents++; 1001 1002 sg_set_page(sg, NULL, length, 0); 1003 sg_dma_address(sg) = addr; 1004 sg_dma_len(sg) = length; 1005 sg = sg_next(sg); 1006 1007 offset += length / I915_GTT_PAGE_SIZE; 1008 left -= length; 1009 } 1010 1011 offset += src_stride - width; 1012 1013 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; 1014 1015 if (!left) 1016 continue; 1017 1018 st->nents++; 1019 1020 /* 1021 * The DE ignores the PTEs for the padding tiles, the sg entry 1022 * here is just a conenience to indicate how many padding PTEs 1023 * to insert at this spot. 1024 */ 1025 sg_set_page(sg, NULL, left, 0); 1026 sg_dma_address(sg) = 0; 1027 sg_dma_len(sg) = left; 1028 sg = sg_next(sg); 1029 } 1030 1031 return sg; 1032 } 1033 1034 static noinline struct sg_table * 1035 intel_remap_pages(struct intel_remapped_info *rem_info, 1036 struct drm_i915_gem_object *obj) 1037 { 1038 unsigned int size = intel_remapped_info_size(rem_info); 1039 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1040 struct sg_table *st; 1041 struct scatterlist *sg; 1042 unsigned int gtt_offset = 0; 1043 int ret = -ENOMEM; 1044 int i; 1045 1046 /* Allocate target SG list. */ 1047 st = kmalloc(sizeof(*st), GFP_KERNEL); 1048 if (!st) 1049 goto err_st_alloc; 1050 1051 ret = sg_alloc_table(st, size, GFP_KERNEL); 1052 if (ret) 1053 goto err_sg_alloc; 1054 1055 st->nents = 0; 1056 sg = st->sgl; 1057 1058 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 1059 unsigned int alignment_pad = 0; 1060 1061 if (rem_info->plane_alignment) 1062 alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; 1063 1064 sg = remap_pages(obj, 1065 rem_info->plane[i].offset, alignment_pad, 1066 rem_info->plane[i].width, rem_info->plane[i].height, 1067 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, 1068 st, sg); 1069 1070 gtt_offset += alignment_pad + 1071 rem_info->plane[i].dst_stride * rem_info->plane[i].height; 1072 } 1073 1074 i915_sg_trim(st); 1075 1076 return st; 1077 1078 err_sg_alloc: 1079 kfree(st); 1080 err_st_alloc: 1081 1082 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 1083 obj->base.size, rem_info->plane[0].width, 1084 rem_info->plane[0].height, size); 1085 1086 return ERR_PTR(ret); 1087 } 1088 1089 static noinline struct sg_table * 1090 intel_partial_pages(const struct i915_ggtt_view *view, 1091 struct drm_i915_gem_object *obj) 1092 { 1093 struct sg_table *st; 1094 struct scatterlist *sg, *iter; 1095 unsigned int count = view->partial.size; 1096 unsigned int offset; 1097 int ret = -ENOMEM; 1098 1099 st = kmalloc(sizeof(*st), GFP_KERNEL); 1100 if (!st) 1101 goto err_st_alloc; 1102 1103 ret = sg_alloc_table(st, count, GFP_KERNEL); 1104 if (ret) 1105 goto err_sg_alloc; 1106 1107 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); 1108 GEM_BUG_ON(!iter); 1109 1110 sg = st->sgl; 1111 st->nents = 0; 1112 do { 1113 unsigned int len; 1114 1115 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1116 count << PAGE_SHIFT); 1117 sg_set_page(sg, NULL, len, 0); 1118 sg_dma_address(sg) = 1119 sg_dma_address(iter) + (offset << PAGE_SHIFT); 1120 sg_dma_len(sg) = len; 1121 1122 st->nents++; 1123 count -= len >> PAGE_SHIFT; 1124 if (count == 0) { 1125 sg_mark_end(sg); 1126 i915_sg_trim(st); /* Drop any unused tail entries. */ 1127 1128 return st; 1129 } 1130 1131 sg = __sg_next(sg); 1132 iter = __sg_next(iter); 1133 offset = 0; 1134 } while (1); 1135 1136 err_sg_alloc: 1137 kfree(st); 1138 err_st_alloc: 1139 return ERR_PTR(ret); 1140 } 1141 1142 static int 1143 __i915_vma_get_pages(struct i915_vma *vma) 1144 { 1145 struct sg_table *pages; 1146 1147 /* 1148 * The vma->pages are only valid within the lifespan of the borrowed 1149 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 1150 * must be the vma->pages. A simple rule is that vma->pages must only 1151 * be accessed when the obj->mm.pages are pinned. 1152 */ 1153 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 1154 1155 switch (vma->ggtt_view.type) { 1156 default: 1157 GEM_BUG_ON(vma->ggtt_view.type); 1158 fallthrough; 1159 case I915_GGTT_VIEW_NORMAL: 1160 pages = vma->obj->mm.pages; 1161 break; 1162 1163 case I915_GGTT_VIEW_ROTATED: 1164 pages = 1165 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 1166 break; 1167 1168 case I915_GGTT_VIEW_REMAPPED: 1169 pages = 1170 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); 1171 break; 1172 1173 case I915_GGTT_VIEW_PARTIAL: 1174 pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 1175 break; 1176 } 1177 1178 if (IS_ERR(pages)) { 1179 drm_err(&vma->vm->i915->drm, 1180 "Failed to get pages for VMA view type %u (%ld)!\n", 1181 vma->ggtt_view.type, PTR_ERR(pages)); 1182 return PTR_ERR(pages); 1183 } 1184 1185 vma->pages = pages; 1186 1187 return 0; 1188 } 1189 1190 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) 1191 { 1192 int err; 1193 1194 if (atomic_add_unless(&vma->pages_count, 1, 0)) 1195 return 0; 1196 1197 err = i915_gem_object_pin_pages(vma->obj); 1198 if (err) 1199 return err; 1200 1201 err = __i915_vma_get_pages(vma); 1202 if (err) 1203 goto err_unpin; 1204 1205 vma->page_sizes = vma->obj->mm.page_sizes; 1206 atomic_inc(&vma->pages_count); 1207 1208 return 0; 1209 1210 err_unpin: 1211 __i915_gem_object_unpin_pages(vma->obj); 1212 1213 return err; 1214 } 1215 1216 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 1217 { 1218 /* We allocate under vma_get_pages, so beware the shrinker */ 1219 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 1220 1221 if (atomic_sub_return(count, &vma->pages_count) == 0) { 1222 if (vma->pages != vma->obj->mm.pages) { 1223 sg_free_table(vma->pages); 1224 kfree(vma->pages); 1225 } 1226 vma->pages = NULL; 1227 1228 i915_gem_object_unpin_pages(vma->obj); 1229 } 1230 } 1231 1232 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) 1233 { 1234 if (atomic_add_unless(&vma->pages_count, -1, 1)) 1235 return; 1236 1237 __vma_put_pages(vma, 1); 1238 } 1239 1240 static void vma_unbind_pages(struct i915_vma *vma) 1241 { 1242 unsigned int count; 1243 1244 lockdep_assert_held(&vma->vm->mutex); 1245 1246 /* The upper portion of pages_count is the number of bindings */ 1247 count = atomic_read(&vma->pages_count); 1248 count >>= I915_VMA_PAGES_BIAS; 1249 GEM_BUG_ON(!count); 1250 1251 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 1252 } 1253 1254 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1255 u64 size, u64 alignment, u64 flags) 1256 { 1257 struct i915_vma_work *work = NULL; 1258 struct dma_fence *moving = NULL; 1259 struct i915_vma_resource *vma_res = NULL; 1260 intel_wakeref_t wakeref = 0; 1261 unsigned int bound; 1262 int err; 1263 1264 assert_vma_held(vma); 1265 GEM_BUG_ON(!ww); 1266 1267 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 1268 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 1269 1270 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 1271 1272 /* First try and grab the pin without rebinding the vma */ 1273 if (try_qad_pin(vma, flags)) 1274 return 0; 1275 1276 err = i915_vma_get_pages(vma); 1277 if (err) 1278 return err; 1279 1280 if (flags & PIN_GLOBAL) 1281 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 1282 1283 moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; 1284 if (flags & vma->vm->bind_async_flags || moving) { 1285 /* lock VM */ 1286 err = i915_vm_lock_objects(vma->vm, ww); 1287 if (err) 1288 goto err_rpm; 1289 1290 work = i915_vma_work(); 1291 if (!work) { 1292 err = -ENOMEM; 1293 goto err_rpm; 1294 } 1295 1296 work->vm = i915_vm_get(vma->vm); 1297 1298 dma_fence_work_chain(&work->base, moving); 1299 1300 /* Allocate enough page directories to used PTE */ 1301 if (vma->vm->allocate_va_range) { 1302 err = i915_vm_alloc_pt_stash(vma->vm, 1303 &work->stash, 1304 vma->size); 1305 if (err) 1306 goto err_fence; 1307 1308 err = i915_vm_map_pt_stash(vma->vm, &work->stash); 1309 if (err) 1310 goto err_fence; 1311 } 1312 } 1313 1314 vma_res = i915_vma_resource_alloc(); 1315 if (IS_ERR(vma_res)) { 1316 err = PTR_ERR(vma_res); 1317 goto err_fence; 1318 } 1319 1320 /* 1321 * Differentiate between user/kernel vma inside the aliasing-ppgtt. 1322 * 1323 * We conflate the Global GTT with the user's vma when using the 1324 * aliasing-ppgtt, but it is still vitally important to try and 1325 * keep the use cases distinct. For example, userptr objects are 1326 * not allowed inside the Global GTT as that will cause lock 1327 * inversions when we have to evict them the mmu_notifier callbacks - 1328 * but they are allowed to be part of the user ppGTT which can never 1329 * be mapped. As such we try to give the distinct users of the same 1330 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 1331 * and i915_ppgtt separate]. 1332 * 1333 * NB this may cause us to mask real lock inversions -- while the 1334 * code is safe today, lockdep may not be able to spot future 1335 * transgressions. 1336 */ 1337 err = mutex_lock_interruptible_nested(&vma->vm->mutex, 1338 !(flags & PIN_GLOBAL)); 1339 if (err) 1340 goto err_vma_res; 1341 1342 /* No more allocations allowed now we hold vm->mutex */ 1343 1344 if (unlikely(i915_vma_is_closed(vma))) { 1345 err = -ENOENT; 1346 goto err_unlock; 1347 } 1348 1349 bound = atomic_read(&vma->flags); 1350 if (unlikely(bound & I915_VMA_ERROR)) { 1351 err = -ENOMEM; 1352 goto err_unlock; 1353 } 1354 1355 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 1356 err = -EAGAIN; /* pins are meant to be fairly temporary */ 1357 goto err_unlock; 1358 } 1359 1360 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 1361 if (!(flags & PIN_VALIDATE)) 1362 __i915_vma_pin(vma); 1363 goto err_unlock; 1364 } 1365 1366 err = i915_active_acquire(&vma->active); 1367 if (err) 1368 goto err_unlock; 1369 1370 if (!(bound & I915_VMA_BIND_MASK)) { 1371 err = i915_vma_insert(vma, ww, size, alignment, flags); 1372 if (err) 1373 goto err_active; 1374 1375 if (i915_is_ggtt(vma->vm)) 1376 __i915_vma_set_map_and_fenceable(vma); 1377 } 1378 1379 GEM_BUG_ON(!vma->pages); 1380 err = i915_vma_bind(vma, 1381 vma->obj->cache_level, 1382 flags, work, vma_res); 1383 vma_res = NULL; 1384 if (err) 1385 goto err_remove; 1386 1387 /* There should only be at most 2 active bindings (user, global) */ 1388 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 1389 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 1390 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 1391 1392 if (!(flags & PIN_VALIDATE)) { 1393 __i915_vma_pin(vma); 1394 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1395 } 1396 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 1397 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 1398 1399 err_remove: 1400 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 1401 i915_vma_detach(vma); 1402 drm_mm_remove_node(&vma->node); 1403 } 1404 err_active: 1405 i915_active_release(&vma->active); 1406 err_unlock: 1407 mutex_unlock(&vma->vm->mutex); 1408 err_vma_res: 1409 kfree(vma_res); 1410 err_fence: 1411 if (work) 1412 dma_fence_work_commit_imm(&work->base); 1413 err_rpm: 1414 if (wakeref) 1415 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1416 1417 if (moving) 1418 dma_fence_put(moving); 1419 1420 i915_vma_put_pages(vma); 1421 return err; 1422 } 1423 1424 static void flush_idle_contexts(struct intel_gt *gt) 1425 { 1426 struct intel_engine_cs *engine; 1427 enum intel_engine_id id; 1428 1429 for_each_engine(engine, gt, id) 1430 intel_engine_flush_barriers(engine); 1431 1432 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 1433 } 1434 1435 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1436 u32 align, unsigned int flags) 1437 { 1438 struct i915_address_space *vm = vma->vm; 1439 int err; 1440 1441 do { 1442 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); 1443 1444 if (err != -ENOSPC) { 1445 if (!err) { 1446 err = i915_vma_wait_for_bind(vma); 1447 if (err) 1448 i915_vma_unpin(vma); 1449 } 1450 return err; 1451 } 1452 1453 /* Unlike i915_vma_pin, we don't take no for an answer! */ 1454 flush_idle_contexts(vm->gt); 1455 if (mutex_lock_interruptible(&vm->mutex) == 0) { 1456 /* 1457 * We pass NULL ww here, as we don't want to unbind 1458 * locked objects when called from execbuf when pinning 1459 * is removed. This would probably regress badly. 1460 */ 1461 i915_gem_evict_vm(vm, NULL); 1462 mutex_unlock(&vm->mutex); 1463 } 1464 } while (1); 1465 } 1466 1467 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1468 u32 align, unsigned int flags) 1469 { 1470 struct i915_gem_ww_ctx _ww; 1471 int err; 1472 1473 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1474 1475 if (ww) 1476 return __i915_ggtt_pin(vma, ww, align, flags); 1477 1478 #ifdef CONFIG_LOCKDEP 1479 WARN_ON(dma_resv_held(vma->obj->base.resv)); 1480 #endif 1481 1482 for_i915_gem_ww(&_ww, err, true) { 1483 err = i915_gem_object_lock(vma->obj, &_ww); 1484 if (!err) 1485 err = __i915_ggtt_pin(vma, &_ww, align, flags); 1486 } 1487 1488 return err; 1489 } 1490 1491 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) 1492 { 1493 /* 1494 * We defer actually closing, unbinding and destroying the VMA until 1495 * the next idle point, or if the object is freed in the meantime. By 1496 * postponing the unbind, we allow for it to be resurrected by the 1497 * client, avoiding the work required to rebind the VMA. This is 1498 * advantageous for DRI, where the client/server pass objects 1499 * between themselves, temporarily opening a local VMA to the 1500 * object, and then closing it again. The same object is then reused 1501 * on the next frame (or two, depending on the depth of the swap queue) 1502 * causing us to rebind the VMA once more. This ends up being a lot 1503 * of wasted work for the steady state. 1504 */ 1505 GEM_BUG_ON(i915_vma_is_closed(vma)); 1506 list_add(&vma->closed_link, >->closed_vma); 1507 } 1508 1509 void i915_vma_close(struct i915_vma *vma) 1510 { 1511 struct intel_gt *gt = vma->vm->gt; 1512 unsigned long flags; 1513 1514 if (i915_vma_is_ggtt(vma)) 1515 return; 1516 1517 GEM_BUG_ON(!atomic_read(&vma->open_count)); 1518 if (atomic_dec_and_lock_irqsave(&vma->open_count, 1519 >->closed_lock, 1520 flags)) { 1521 __vma_close(vma, gt); 1522 spin_unlock_irqrestore(>->closed_lock, flags); 1523 } 1524 } 1525 1526 static void __i915_vma_remove_closed(struct i915_vma *vma) 1527 { 1528 struct intel_gt *gt = vma->vm->gt; 1529 1530 spin_lock_irq(>->closed_lock); 1531 list_del_init(&vma->closed_link); 1532 spin_unlock_irq(>->closed_lock); 1533 } 1534 1535 void i915_vma_reopen(struct i915_vma *vma) 1536 { 1537 if (i915_vma_is_closed(vma)) 1538 __i915_vma_remove_closed(vma); 1539 } 1540 1541 void i915_vma_release(struct kref *ref) 1542 { 1543 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); 1544 struct drm_i915_gem_object *obj = vma->obj; 1545 1546 if (drm_mm_node_allocated(&vma->node)) { 1547 mutex_lock(&vma->vm->mutex); 1548 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 1549 WARN_ON(__i915_vma_unbind(vma)); 1550 mutex_unlock(&vma->vm->mutex); 1551 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 1552 } 1553 GEM_BUG_ON(i915_vma_is_active(vma)); 1554 1555 spin_lock(&obj->vma.lock); 1556 list_del(&vma->obj_link); 1557 if (!RB_EMPTY_NODE(&vma->obj_node)) 1558 rb_erase(&vma->obj_node, &obj->vma.tree); 1559 spin_unlock(&obj->vma.lock); 1560 1561 __i915_vma_remove_closed(vma); 1562 i915_vm_put(vma->vm); 1563 1564 i915_active_fini(&vma->active); 1565 GEM_WARN_ON(vma->resource); 1566 i915_vma_free(vma); 1567 } 1568 1569 void i915_vma_parked(struct intel_gt *gt) 1570 { 1571 struct i915_vma *vma, *next; 1572 LIST_HEAD(closed); 1573 1574 spin_lock_irq(>->closed_lock); 1575 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 1576 struct drm_i915_gem_object *obj = vma->obj; 1577 struct i915_address_space *vm = vma->vm; 1578 1579 /* XXX All to avoid keeping a reference on i915_vma itself */ 1580 1581 if (!kref_get_unless_zero(&obj->base.refcount)) 1582 continue; 1583 1584 if (!i915_vm_tryopen(vm)) { 1585 i915_gem_object_put(obj); 1586 continue; 1587 } 1588 1589 list_move(&vma->closed_link, &closed); 1590 } 1591 spin_unlock_irq(>->closed_lock); 1592 1593 /* As the GT is held idle, no vma can be reopened as we destroy them */ 1594 list_for_each_entry_safe(vma, next, &closed, closed_link) { 1595 struct drm_i915_gem_object *obj = vma->obj; 1596 struct i915_address_space *vm = vma->vm; 1597 1598 if (i915_gem_object_trylock(obj, NULL)) { 1599 INIT_LIST_HEAD(&vma->closed_link); 1600 __i915_vma_put(vma); 1601 i915_gem_object_unlock(obj); 1602 } else { 1603 /* back you go.. */ 1604 spin_lock_irq(>->closed_lock); 1605 list_add(&vma->closed_link, >->closed_vma); 1606 spin_unlock_irq(>->closed_lock); 1607 } 1608 1609 i915_gem_object_put(obj); 1610 i915_vm_close(vm); 1611 } 1612 } 1613 1614 static void __i915_vma_iounmap(struct i915_vma *vma) 1615 { 1616 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1617 1618 if (vma->iomap == NULL) 1619 return; 1620 1621 io_mapping_unmap(vma->iomap); 1622 vma->iomap = NULL; 1623 } 1624 1625 void i915_vma_revoke_mmap(struct i915_vma *vma) 1626 { 1627 struct drm_vma_offset_node *node; 1628 u64 vma_offset; 1629 1630 if (!i915_vma_has_userfault(vma)) 1631 return; 1632 1633 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1634 GEM_BUG_ON(!vma->obj->userfault_count); 1635 1636 node = &vma->mmo->vma_node; 1637 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 1638 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1639 drm_vma_node_offset_addr(node) + vma_offset, 1640 vma->size, 1641 1); 1642 1643 i915_vma_unset_userfault(vma); 1644 if (!--vma->obj->userfault_count) 1645 list_del(&vma->obj->userfault_link); 1646 } 1647 1648 static int 1649 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) 1650 { 1651 return __i915_request_await_exclusive(rq, &vma->active); 1652 } 1653 1654 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 1655 { 1656 int err; 1657 1658 /* Wait for the vma to be bound before we start! */ 1659 err = __i915_request_await_bind(rq, vma); 1660 if (err) 1661 return err; 1662 1663 return i915_active_add_request(&vma->active, rq); 1664 } 1665 1666 int _i915_vma_move_to_active(struct i915_vma *vma, 1667 struct i915_request *rq, 1668 struct dma_fence *fence, 1669 unsigned int flags) 1670 { 1671 struct drm_i915_gem_object *obj = vma->obj; 1672 int err; 1673 1674 assert_object_held(obj); 1675 1676 GEM_BUG_ON(!vma->pages); 1677 1678 err = __i915_vma_move_to_active(vma, rq); 1679 if (unlikely(err)) 1680 return err; 1681 1682 if (flags & EXEC_OBJECT_WRITE) { 1683 struct intel_frontbuffer *front; 1684 1685 front = __intel_frontbuffer_get(obj); 1686 if (unlikely(front)) { 1687 if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1688 i915_active_add_request(&front->write, rq); 1689 intel_frontbuffer_put(front); 1690 } 1691 1692 if (fence) { 1693 dma_resv_add_excl_fence(vma->obj->base.resv, fence); 1694 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1695 obj->read_domains = 0; 1696 } 1697 } else { 1698 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { 1699 err = dma_resv_reserve_shared(vma->obj->base.resv, 1); 1700 if (unlikely(err)) 1701 return err; 1702 } 1703 1704 if (fence) { 1705 dma_resv_add_shared_fence(vma->obj->base.resv, fence); 1706 obj->write_domain = 0; 1707 } 1708 } 1709 1710 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 1711 i915_active_add_request(&vma->fence->active, rq); 1712 1713 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1714 obj->mm.dirty = true; 1715 1716 GEM_BUG_ON(!i915_vma_is_active(vma)); 1717 return 0; 1718 } 1719 1720 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) 1721 { 1722 struct i915_vma_resource *vma_res = vma->resource; 1723 struct dma_fence *unbind_fence; 1724 1725 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1726 assert_object_held_shared(vma->obj); 1727 1728 if (i915_vma_is_map_and_fenceable(vma)) { 1729 /* Force a pagefault for domain tracking on next user access */ 1730 i915_vma_revoke_mmap(vma); 1731 1732 /* 1733 * Check that we have flushed all writes through the GGTT 1734 * before the unbind, other due to non-strict nature of those 1735 * indirect writes they may end up referencing the GGTT PTE 1736 * after the unbind. 1737 * 1738 * Note that we may be concurrently poking at the GGTT_WRITE 1739 * bit from set-domain, as we mark all GGTT vma associated 1740 * with an object. We know this is for another vma, as we 1741 * are currently unbinding this one -- so if this vma will be 1742 * reused, it will be refaulted and have its dirty bit set 1743 * before the next write. 1744 */ 1745 i915_vma_flush_writes(vma); 1746 1747 /* release the fence reg _after_ flushing */ 1748 i915_vma_revoke_fence(vma); 1749 1750 __i915_vma_iounmap(vma); 1751 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1752 } 1753 GEM_BUG_ON(vma->fence); 1754 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1755 1756 /* Object backend must be async capable. */ 1757 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); 1758 1759 /* If vm is not open, unbind is a nop. */ 1760 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && 1761 atomic_read(&vma->vm->open); 1762 trace_i915_vma_unbind(vma); 1763 1764 unbind_fence = i915_vma_resource_unbind(vma_res); 1765 vma->resource = NULL; 1766 1767 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 1768 &vma->flags); 1769 1770 i915_vma_detach(vma); 1771 1772 if (!async && unbind_fence) { 1773 dma_fence_wait(unbind_fence, false); 1774 dma_fence_put(unbind_fence); 1775 unbind_fence = NULL; 1776 } 1777 1778 /* 1779 * Binding itself may not have completed until the unbind fence signals, 1780 * so don't drop the pages until that happens, unless the resource is 1781 * async_capable. 1782 */ 1783 1784 vma_unbind_pages(vma); 1785 return unbind_fence; 1786 } 1787 1788 int __i915_vma_unbind(struct i915_vma *vma) 1789 { 1790 int ret; 1791 1792 lockdep_assert_held(&vma->vm->mutex); 1793 assert_object_held_shared(vma->obj); 1794 1795 if (!drm_mm_node_allocated(&vma->node)) 1796 return 0; 1797 1798 if (i915_vma_is_pinned(vma)) { 1799 vma_print_allocator(vma, "is pinned"); 1800 return -EAGAIN; 1801 } 1802 1803 /* 1804 * After confirming that no one else is pinning this vma, wait for 1805 * any laggards who may have crept in during the wait (through 1806 * a residual pin skipping the vm->mutex) to complete. 1807 */ 1808 ret = i915_vma_sync(vma); 1809 if (ret) 1810 return ret; 1811 1812 GEM_BUG_ON(i915_vma_is_active(vma)); 1813 __i915_vma_evict(vma, false); 1814 1815 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 1816 return 0; 1817 } 1818 1819 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) 1820 { 1821 struct dma_fence *fence; 1822 1823 lockdep_assert_held(&vma->vm->mutex); 1824 1825 if (!drm_mm_node_allocated(&vma->node)) 1826 return NULL; 1827 1828 if (i915_vma_is_pinned(vma) || 1829 &vma->obj->mm.rsgt->table != vma->resource->bi.pages) 1830 return ERR_PTR(-EAGAIN); 1831 1832 /* 1833 * We probably need to replace this with awaiting the fences of the 1834 * object's dma_resv when the vma active goes away. When doing that 1835 * we need to be careful to not add the vma_resource unbind fence 1836 * immediately to the object's dma_resv, because then unbinding 1837 * the next vma from the object, in case there are many, will 1838 * actually await the unbinding of the previous vmas, which is 1839 * undesirable. 1840 */ 1841 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active, 1842 I915_ACTIVE_AWAIT_EXCL | 1843 I915_ACTIVE_AWAIT_ACTIVE) < 0) { 1844 return ERR_PTR(-EBUSY); 1845 } 1846 1847 fence = __i915_vma_evict(vma, true); 1848 1849 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 1850 1851 return fence; 1852 } 1853 1854 int i915_vma_unbind(struct i915_vma *vma) 1855 { 1856 struct i915_address_space *vm = vma->vm; 1857 intel_wakeref_t wakeref = 0; 1858 int err; 1859 1860 assert_object_held_shared(vma->obj); 1861 1862 /* Optimistic wait before taking the mutex */ 1863 err = i915_vma_sync(vma); 1864 if (err) 1865 return err; 1866 1867 if (!drm_mm_node_allocated(&vma->node)) 1868 return 0; 1869 1870 if (i915_vma_is_pinned(vma)) { 1871 vma_print_allocator(vma, "is pinned"); 1872 return -EAGAIN; 1873 } 1874 1875 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 1876 /* XXX not always required: nop_clear_range */ 1877 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 1878 1879 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 1880 if (err) 1881 goto out_rpm; 1882 1883 err = __i915_vma_unbind(vma); 1884 mutex_unlock(&vm->mutex); 1885 1886 out_rpm: 1887 if (wakeref) 1888 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 1889 return err; 1890 } 1891 1892 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) 1893 { 1894 struct drm_i915_gem_object *obj = vma->obj; 1895 struct i915_address_space *vm = vma->vm; 1896 intel_wakeref_t wakeref = 0; 1897 struct dma_fence *fence; 1898 int err; 1899 1900 /* 1901 * We need the dma-resv lock since we add the 1902 * unbind fence to the dma-resv object. 1903 */ 1904 assert_object_held(obj); 1905 1906 if (!drm_mm_node_allocated(&vma->node)) 1907 return 0; 1908 1909 if (i915_vma_is_pinned(vma)) { 1910 vma_print_allocator(vma, "is pinned"); 1911 return -EAGAIN; 1912 } 1913 1914 if (!obj->mm.rsgt) 1915 return -EBUSY; 1916 1917 err = dma_resv_reserve_shared(obj->base.resv, 1); 1918 if (err) 1919 return -EBUSY; 1920 1921 /* 1922 * It would be great if we could grab this wakeref from the 1923 * async unbind work if needed, but we can't because it uses 1924 * kmalloc and it's in the dma-fence signalling critical path. 1925 */ 1926 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 1927 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 1928 1929 if (trylock_vm && !mutex_trylock(&vm->mutex)) { 1930 err = -EBUSY; 1931 goto out_rpm; 1932 } else if (!trylock_vm) { 1933 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); 1934 if (err) 1935 goto out_rpm; 1936 } 1937 1938 fence = __i915_vma_unbind_async(vma); 1939 mutex_unlock(&vm->mutex); 1940 if (IS_ERR_OR_NULL(fence)) { 1941 err = PTR_ERR_OR_ZERO(fence); 1942 goto out_rpm; 1943 } 1944 1945 dma_resv_add_shared_fence(obj->base.resv, fence); 1946 dma_fence_put(fence); 1947 1948 out_rpm: 1949 if (wakeref) 1950 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 1951 return err; 1952 } 1953 1954 int i915_vma_unbind_unlocked(struct i915_vma *vma) 1955 { 1956 int err; 1957 1958 i915_gem_object_lock(vma->obj, NULL); 1959 err = i915_vma_unbind(vma); 1960 i915_gem_object_unlock(vma->obj); 1961 1962 return err; 1963 } 1964 1965 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 1966 { 1967 i915_gem_object_make_unshrinkable(vma->obj); 1968 return vma; 1969 } 1970 1971 void i915_vma_make_shrinkable(struct i915_vma *vma) 1972 { 1973 i915_gem_object_make_shrinkable(vma->obj); 1974 } 1975 1976 void i915_vma_make_purgeable(struct i915_vma *vma) 1977 { 1978 i915_gem_object_make_purgeable(vma->obj); 1979 } 1980 1981 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1982 #include "selftests/i915_vma.c" 1983 #endif 1984 1985 void i915_vma_module_exit(void) 1986 { 1987 kmem_cache_destroy(slab_vmas); 1988 } 1989 1990 int __init i915_vma_module_init(void) 1991 { 1992 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 1993 if (!slab_vmas) 1994 return -ENOMEM; 1995 1996 return 0; 1997 } 1998