1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <drm/drm_gem.h> 27 28 #include "display/intel_frontbuffer.h" 29 30 #include "gt/intel_engine.h" 31 #include "gt/intel_gt.h" 32 33 #include "i915_drv.h" 34 #include "i915_globals.h" 35 #include "i915_vma.h" 36 37 static struct i915_global_vma { 38 struct i915_global base; 39 struct kmem_cache *slab_vmas; 40 } global; 41 42 struct i915_vma *i915_vma_alloc(void) 43 { 44 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 45 } 46 47 void i915_vma_free(struct i915_vma *vma) 48 { 49 return kmem_cache_free(global.slab_vmas, vma); 50 } 51 52 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 53 54 #include <linux/stackdepot.h> 55 56 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 57 { 58 unsigned long *entries; 59 unsigned int nr_entries; 60 char buf[512]; 61 62 if (!vma->node.stack) { 63 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 64 vma->node.start, vma->node.size, reason); 65 return; 66 } 67 68 nr_entries = stack_depot_fetch(vma->node.stack, &entries); 69 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); 70 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 71 vma->node.start, vma->node.size, reason, buf); 72 } 73 74 #else 75 76 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 77 { 78 } 79 80 #endif 81 82 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 83 { 84 return container_of(ref, typeof(struct i915_vma), active); 85 } 86 87 static int __i915_vma_active(struct i915_active *ref) 88 { 89 i915_vma_get(active_to_vma(ref)); 90 return 0; 91 } 92 93 static void __i915_vma_retire(struct i915_active *ref) 94 { 95 i915_vma_put(active_to_vma(ref)); 96 } 97 98 static struct i915_vma * 99 vma_create(struct drm_i915_gem_object *obj, 100 struct i915_address_space *vm, 101 const struct i915_ggtt_view *view) 102 { 103 struct i915_vma *vma; 104 struct rb_node *rb, **p; 105 106 /* The aliasing_ppgtt should never be used directly! */ 107 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 108 109 vma = i915_vma_alloc(); 110 if (vma == NULL) 111 return ERR_PTR(-ENOMEM); 112 113 vma->vm = vm; 114 vma->ops = &vm->vma_ops; 115 vma->obj = obj; 116 vma->resv = obj->base.resv; 117 vma->size = obj->base.size; 118 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 119 120 i915_active_init(vm->i915, &vma->active, 121 __i915_vma_active, __i915_vma_retire); 122 INIT_ACTIVE_REQUEST(&vma->last_fence); 123 124 /* Declare ourselves safe for use inside shrinkers */ 125 if (IS_ENABLED(CONFIG_LOCKDEP)) { 126 fs_reclaim_acquire(GFP_KERNEL); 127 might_lock(&vma->active.mutex); 128 fs_reclaim_release(GFP_KERNEL); 129 } 130 131 INIT_LIST_HEAD(&vma->closed_link); 132 133 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 134 vma->ggtt_view = *view; 135 if (view->type == I915_GGTT_VIEW_PARTIAL) { 136 GEM_BUG_ON(range_overflows_t(u64, 137 view->partial.offset, 138 view->partial.size, 139 obj->base.size >> PAGE_SHIFT)); 140 vma->size = view->partial.size; 141 vma->size <<= PAGE_SHIFT; 142 GEM_BUG_ON(vma->size > obj->base.size); 143 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 144 vma->size = intel_rotation_info_size(&view->rotated); 145 vma->size <<= PAGE_SHIFT; 146 } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 147 vma->size = intel_remapped_info_size(&view->remapped); 148 vma->size <<= PAGE_SHIFT; 149 } 150 } 151 152 if (unlikely(vma->size > vm->total)) 153 goto err_vma; 154 155 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 156 157 if (i915_is_ggtt(vm)) { 158 if (unlikely(overflows_type(vma->size, u32))) 159 goto err_vma; 160 161 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 162 i915_gem_object_get_tiling(obj), 163 i915_gem_object_get_stride(obj)); 164 if (unlikely(vma->fence_size < vma->size || /* overflow */ 165 vma->fence_size > vm->total)) 166 goto err_vma; 167 168 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 169 170 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 171 i915_gem_object_get_tiling(obj), 172 i915_gem_object_get_stride(obj)); 173 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 174 175 vma->flags |= I915_VMA_GGTT; 176 } 177 178 spin_lock(&obj->vma.lock); 179 180 rb = NULL; 181 p = &obj->vma.tree.rb_node; 182 while (*p) { 183 struct i915_vma *pos; 184 long cmp; 185 186 rb = *p; 187 pos = rb_entry(rb, struct i915_vma, obj_node); 188 189 /* 190 * If the view already exists in the tree, another thread 191 * already created a matching vma, so return the older instance 192 * and dispose of ours. 193 */ 194 cmp = i915_vma_compare(pos, vm, view); 195 if (cmp == 0) { 196 spin_unlock(&obj->vma.lock); 197 i915_vma_free(vma); 198 return pos; 199 } 200 201 if (cmp < 0) 202 p = &rb->rb_right; 203 else 204 p = &rb->rb_left; 205 } 206 rb_link_node(&vma->obj_node, rb, p); 207 rb_insert_color(&vma->obj_node, &obj->vma.tree); 208 209 if (i915_vma_is_ggtt(vma)) 210 /* 211 * We put the GGTT vma at the start of the vma-list, followed 212 * by the ppGGTT vma. This allows us to break early when 213 * iterating over only the GGTT vma for an object, see 214 * for_each_ggtt_vma() 215 */ 216 list_add(&vma->obj_link, &obj->vma.list); 217 else 218 list_add_tail(&vma->obj_link, &obj->vma.list); 219 220 spin_unlock(&obj->vma.lock); 221 222 mutex_lock(&vm->mutex); 223 list_add(&vma->vm_link, &vm->unbound_list); 224 mutex_unlock(&vm->mutex); 225 226 return vma; 227 228 err_vma: 229 i915_vma_free(vma); 230 return ERR_PTR(-E2BIG); 231 } 232 233 static struct i915_vma * 234 vma_lookup(struct drm_i915_gem_object *obj, 235 struct i915_address_space *vm, 236 const struct i915_ggtt_view *view) 237 { 238 struct rb_node *rb; 239 240 rb = obj->vma.tree.rb_node; 241 while (rb) { 242 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 243 long cmp; 244 245 cmp = i915_vma_compare(vma, vm, view); 246 if (cmp == 0) 247 return vma; 248 249 if (cmp < 0) 250 rb = rb->rb_right; 251 else 252 rb = rb->rb_left; 253 } 254 255 return NULL; 256 } 257 258 /** 259 * i915_vma_instance - return the singleton instance of the VMA 260 * @obj: parent &struct drm_i915_gem_object to be mapped 261 * @vm: address space in which the mapping is located 262 * @view: additional mapping requirements 263 * 264 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 265 * the same @view characteristics. If a match is not found, one is created. 266 * Once created, the VMA is kept until either the object is freed, or the 267 * address space is closed. 268 * 269 * Must be called with struct_mutex held. 270 * 271 * Returns the vma, or an error pointer. 272 */ 273 struct i915_vma * 274 i915_vma_instance(struct drm_i915_gem_object *obj, 275 struct i915_address_space *vm, 276 const struct i915_ggtt_view *view) 277 { 278 struct i915_vma *vma; 279 280 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 281 GEM_BUG_ON(vm->closed); 282 283 spin_lock(&obj->vma.lock); 284 vma = vma_lookup(obj, vm, view); 285 spin_unlock(&obj->vma.lock); 286 287 /* vma_create() will resolve the race if another creates the vma */ 288 if (unlikely(!vma)) 289 vma = vma_create(obj, vm, view); 290 291 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 292 return vma; 293 } 294 295 /** 296 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 297 * @vma: VMA to map 298 * @cache_level: mapping cache level 299 * @flags: flags like global or local mapping 300 * 301 * DMA addresses are taken from the scatter-gather table of this object (or of 302 * this VMA in case of non-default GGTT views) and PTE entries set up. 303 * Note that DMA addresses are also the only part of the SG table we care about. 304 */ 305 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 306 u32 flags) 307 { 308 u32 bind_flags; 309 u32 vma_flags; 310 int ret; 311 312 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 313 GEM_BUG_ON(vma->size > vma->node.size); 314 315 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 316 vma->node.size, 317 vma->vm->total))) 318 return -ENODEV; 319 320 if (GEM_DEBUG_WARN_ON(!flags)) 321 return -EINVAL; 322 323 bind_flags = 0; 324 if (flags & PIN_GLOBAL) 325 bind_flags |= I915_VMA_GLOBAL_BIND; 326 if (flags & PIN_USER) 327 bind_flags |= I915_VMA_LOCAL_BIND; 328 329 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 330 if (flags & PIN_UPDATE) 331 bind_flags |= vma_flags; 332 else 333 bind_flags &= ~vma_flags; 334 if (bind_flags == 0) 335 return 0; 336 337 GEM_BUG_ON(!vma->pages); 338 339 trace_i915_vma_bind(vma, bind_flags); 340 ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 341 if (ret) 342 return ret; 343 344 vma->flags |= bind_flags; 345 return 0; 346 } 347 348 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 349 { 350 void __iomem *ptr; 351 int err; 352 353 /* Access through the GTT requires the device to be awake. */ 354 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); 355 356 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 357 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 358 err = -ENODEV; 359 goto err; 360 } 361 362 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 363 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 364 365 ptr = vma->iomap; 366 if (ptr == NULL) { 367 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 368 vma->node.start, 369 vma->node.size); 370 if (ptr == NULL) { 371 err = -ENOMEM; 372 goto err; 373 } 374 375 vma->iomap = ptr; 376 } 377 378 __i915_vma_pin(vma); 379 380 err = i915_vma_pin_fence(vma); 381 if (err) 382 goto err_unpin; 383 384 i915_vma_set_ggtt_write(vma); 385 return ptr; 386 387 err_unpin: 388 __i915_vma_unpin(vma); 389 err: 390 return IO_ERR_PTR(err); 391 } 392 393 void i915_vma_flush_writes(struct i915_vma *vma) 394 { 395 if (!i915_vma_has_ggtt_write(vma)) 396 return; 397 398 intel_gt_flush_ggtt_writes(vma->vm->gt); 399 400 i915_vma_unset_ggtt_write(vma); 401 } 402 403 void i915_vma_unpin_iomap(struct i915_vma *vma) 404 { 405 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 406 407 GEM_BUG_ON(vma->iomap == NULL); 408 409 i915_vma_flush_writes(vma); 410 411 i915_vma_unpin_fence(vma); 412 i915_vma_unpin(vma); 413 } 414 415 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 416 { 417 struct i915_vma *vma; 418 struct drm_i915_gem_object *obj; 419 420 vma = fetch_and_zero(p_vma); 421 if (!vma) 422 return; 423 424 obj = vma->obj; 425 GEM_BUG_ON(!obj); 426 427 i915_vma_unpin(vma); 428 i915_vma_close(vma); 429 430 if (flags & I915_VMA_RELEASE_MAP) 431 i915_gem_object_unpin_map(obj); 432 433 i915_gem_object_put(obj); 434 } 435 436 bool i915_vma_misplaced(const struct i915_vma *vma, 437 u64 size, u64 alignment, u64 flags) 438 { 439 if (!drm_mm_node_allocated(&vma->node)) 440 return false; 441 442 if (vma->node.size < size) 443 return true; 444 445 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 446 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 447 return true; 448 449 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 450 return true; 451 452 if (flags & PIN_OFFSET_BIAS && 453 vma->node.start < (flags & PIN_OFFSET_MASK)) 454 return true; 455 456 if (flags & PIN_OFFSET_FIXED && 457 vma->node.start != (flags & PIN_OFFSET_MASK)) 458 return true; 459 460 return false; 461 } 462 463 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 464 { 465 bool mappable, fenceable; 466 467 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 468 GEM_BUG_ON(!vma->fence_size); 469 470 fenceable = (vma->node.size >= vma->fence_size && 471 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 472 473 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 474 475 if (mappable && fenceable) 476 vma->flags |= I915_VMA_CAN_FENCE; 477 else 478 vma->flags &= ~I915_VMA_CAN_FENCE; 479 } 480 481 static bool color_differs(struct drm_mm_node *node, unsigned long color) 482 { 483 return node->allocated && node->color != color; 484 } 485 486 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) 487 { 488 struct drm_mm_node *node = &vma->node; 489 struct drm_mm_node *other; 490 491 /* 492 * On some machines we have to be careful when putting differing types 493 * of snoopable memory together to avoid the prefetcher crossing memory 494 * domains and dying. During vm initialisation, we decide whether or not 495 * these constraints apply and set the drm_mm.color_adjust 496 * appropriately. 497 */ 498 if (vma->vm->mm.color_adjust == NULL) 499 return true; 500 501 /* Only valid to be called on an already inserted vma */ 502 GEM_BUG_ON(!drm_mm_node_allocated(node)); 503 GEM_BUG_ON(list_empty(&node->node_list)); 504 505 other = list_prev_entry(node, node_list); 506 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) 507 return false; 508 509 other = list_next_entry(node, node_list); 510 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) 511 return false; 512 513 return true; 514 } 515 516 static void assert_bind_count(const struct drm_i915_gem_object *obj) 517 { 518 /* 519 * Combine the assertion that the object is bound and that we have 520 * pinned its pages. But we should never have bound the object 521 * more than we have pinned its pages. (For complete accuracy, we 522 * assume that no else is pinning the pages, but as a rough assertion 523 * that we will not run into problems later, this will do!) 524 */ 525 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count)); 526 } 527 528 /** 529 * i915_vma_insert - finds a slot for the vma in its address space 530 * @vma: the vma 531 * @size: requested size in bytes (can be larger than the VMA) 532 * @alignment: required alignment 533 * @flags: mask of PIN_* flags to use 534 * 535 * First we try to allocate some free space that meets the requirements for 536 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 537 * preferrably the oldest idle entry to make room for the new VMA. 538 * 539 * Returns: 540 * 0 on success, negative error code otherwise. 541 */ 542 static int 543 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 544 { 545 struct drm_i915_private *dev_priv = vma->vm->i915; 546 unsigned int cache_level; 547 u64 start, end; 548 int ret; 549 550 GEM_BUG_ON(i915_vma_is_closed(vma)); 551 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 552 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 553 554 size = max(size, vma->size); 555 alignment = max(alignment, vma->display_alignment); 556 if (flags & PIN_MAPPABLE) { 557 size = max_t(typeof(size), size, vma->fence_size); 558 alignment = max_t(typeof(alignment), 559 alignment, vma->fence_alignment); 560 } 561 562 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 563 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 564 GEM_BUG_ON(!is_power_of_2(alignment)); 565 566 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 567 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 568 569 end = vma->vm->total; 570 if (flags & PIN_MAPPABLE) 571 end = min_t(u64, end, dev_priv->ggtt.mappable_end); 572 if (flags & PIN_ZONE_4G) 573 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 574 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 575 576 /* If binding the object/GGTT view requires more space than the entire 577 * aperture has, reject it early before evicting everything in a vain 578 * attempt to find space. 579 */ 580 if (size > end) { 581 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 582 size, flags & PIN_MAPPABLE ? "mappable" : "total", 583 end); 584 return -ENOSPC; 585 } 586 587 if (vma->obj) { 588 ret = i915_gem_object_pin_pages(vma->obj); 589 if (ret) 590 return ret; 591 592 cache_level = vma->obj->cache_level; 593 } else { 594 cache_level = 0; 595 } 596 597 GEM_BUG_ON(vma->pages); 598 599 ret = vma->ops->set_pages(vma); 600 if (ret) 601 goto err_unpin; 602 603 if (flags & PIN_OFFSET_FIXED) { 604 u64 offset = flags & PIN_OFFSET_MASK; 605 if (!IS_ALIGNED(offset, alignment) || 606 range_overflows(offset, size, end)) { 607 ret = -EINVAL; 608 goto err_clear; 609 } 610 611 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 612 size, offset, cache_level, 613 flags); 614 if (ret) 615 goto err_clear; 616 } else { 617 /* 618 * We only support huge gtt pages through the 48b PPGTT, 619 * however we also don't want to force any alignment for 620 * objects which need to be tightly packed into the low 32bits. 621 * 622 * Note that we assume that GGTT are limited to 4GiB for the 623 * forseeable future. See also i915_ggtt_offset(). 624 */ 625 if (upper_32_bits(end - 1) && 626 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 627 /* 628 * We can't mix 64K and 4K PTEs in the same page-table 629 * (2M block), and so to avoid the ugliness and 630 * complexity of coloring we opt for just aligning 64K 631 * objects to 2M. 632 */ 633 u64 page_alignment = 634 rounddown_pow_of_two(vma->page_sizes.sg | 635 I915_GTT_PAGE_SIZE_2M); 636 637 /* 638 * Check we don't expand for the limited Global GTT 639 * (mappable aperture is even more precious!). This 640 * also checks that we exclude the aliasing-ppgtt. 641 */ 642 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 643 644 alignment = max(alignment, page_alignment); 645 646 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 647 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 648 } 649 650 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 651 size, alignment, cache_level, 652 start, end, flags); 653 if (ret) 654 goto err_clear; 655 656 GEM_BUG_ON(vma->node.start < start); 657 GEM_BUG_ON(vma->node.start + vma->node.size > end); 658 } 659 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 660 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); 661 662 mutex_lock(&vma->vm->mutex); 663 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 664 mutex_unlock(&vma->vm->mutex); 665 666 if (vma->obj) { 667 atomic_inc(&vma->obj->bind_count); 668 assert_bind_count(vma->obj); 669 } 670 671 return 0; 672 673 err_clear: 674 vma->ops->clear_pages(vma); 675 err_unpin: 676 if (vma->obj) 677 i915_gem_object_unpin_pages(vma->obj); 678 return ret; 679 } 680 681 static void 682 i915_vma_remove(struct i915_vma *vma) 683 { 684 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 685 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 686 687 vma->ops->clear_pages(vma); 688 689 mutex_lock(&vma->vm->mutex); 690 drm_mm_remove_node(&vma->node); 691 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 692 mutex_unlock(&vma->vm->mutex); 693 694 /* 695 * Since the unbound list is global, only move to that list if 696 * no more VMAs exist. 697 */ 698 if (vma->obj) { 699 struct drm_i915_gem_object *obj = vma->obj; 700 701 atomic_dec(&obj->bind_count); 702 703 /* 704 * And finally now the object is completely decoupled from this 705 * vma, we can drop its hold on the backing storage and allow 706 * it to be reaped by the shrinker. 707 */ 708 i915_gem_object_unpin_pages(obj); 709 assert_bind_count(obj); 710 } 711 } 712 713 int __i915_vma_do_pin(struct i915_vma *vma, 714 u64 size, u64 alignment, u64 flags) 715 { 716 const unsigned int bound = vma->flags; 717 int ret; 718 719 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 720 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 721 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 722 723 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 724 ret = -EBUSY; 725 goto err_unpin; 726 } 727 728 if ((bound & I915_VMA_BIND_MASK) == 0) { 729 ret = i915_vma_insert(vma, size, alignment, flags); 730 if (ret) 731 goto err_unpin; 732 } 733 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 734 735 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); 736 if (ret) 737 goto err_remove; 738 739 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); 740 741 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 742 __i915_vma_set_map_and_fenceable(vma); 743 744 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 745 return 0; 746 747 err_remove: 748 if ((bound & I915_VMA_BIND_MASK) == 0) { 749 i915_vma_remove(vma); 750 GEM_BUG_ON(vma->pages); 751 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); 752 } 753 err_unpin: 754 __i915_vma_unpin(vma); 755 return ret; 756 } 757 758 void i915_vma_close(struct i915_vma *vma) 759 { 760 struct drm_i915_private *i915 = vma->vm->i915; 761 unsigned long flags; 762 763 GEM_BUG_ON(i915_vma_is_closed(vma)); 764 765 /* 766 * We defer actually closing, unbinding and destroying the VMA until 767 * the next idle point, or if the object is freed in the meantime. By 768 * postponing the unbind, we allow for it to be resurrected by the 769 * client, avoiding the work required to rebind the VMA. This is 770 * advantageous for DRI, where the client/server pass objects 771 * between themselves, temporarily opening a local VMA to the 772 * object, and then closing it again. The same object is then reused 773 * on the next frame (or two, depending on the depth of the swap queue) 774 * causing us to rebind the VMA once more. This ends up being a lot 775 * of wasted work for the steady state. 776 */ 777 spin_lock_irqsave(&i915->gt.closed_lock, flags); 778 list_add(&vma->closed_link, &i915->gt.closed_vma); 779 spin_unlock_irqrestore(&i915->gt.closed_lock, flags); 780 } 781 782 static void __i915_vma_remove_closed(struct i915_vma *vma) 783 { 784 struct drm_i915_private *i915 = vma->vm->i915; 785 786 if (!i915_vma_is_closed(vma)) 787 return; 788 789 spin_lock_irq(&i915->gt.closed_lock); 790 list_del_init(&vma->closed_link); 791 spin_unlock_irq(&i915->gt.closed_lock); 792 } 793 794 void i915_vma_reopen(struct i915_vma *vma) 795 { 796 __i915_vma_remove_closed(vma); 797 } 798 799 static void __i915_vma_destroy(struct i915_vma *vma) 800 { 801 GEM_BUG_ON(vma->node.allocated); 802 GEM_BUG_ON(vma->fence); 803 804 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence)); 805 806 mutex_lock(&vma->vm->mutex); 807 list_del(&vma->vm_link); 808 mutex_unlock(&vma->vm->mutex); 809 810 if (vma->obj) { 811 struct drm_i915_gem_object *obj = vma->obj; 812 813 spin_lock(&obj->vma.lock); 814 list_del(&vma->obj_link); 815 rb_erase(&vma->obj_node, &vma->obj->vma.tree); 816 spin_unlock(&obj->vma.lock); 817 } 818 819 i915_active_fini(&vma->active); 820 821 i915_vma_free(vma); 822 } 823 824 void i915_vma_destroy(struct i915_vma *vma) 825 { 826 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 827 828 GEM_BUG_ON(i915_vma_is_pinned(vma)); 829 830 __i915_vma_remove_closed(vma); 831 832 WARN_ON(i915_vma_unbind(vma)); 833 GEM_BUG_ON(i915_vma_is_active(vma)); 834 835 __i915_vma_destroy(vma); 836 } 837 838 void i915_vma_parked(struct drm_i915_private *i915) 839 { 840 struct i915_vma *vma, *next; 841 842 spin_lock_irq(&i915->gt.closed_lock); 843 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { 844 list_del_init(&vma->closed_link); 845 spin_unlock_irq(&i915->gt.closed_lock); 846 847 i915_vma_destroy(vma); 848 849 spin_lock_irq(&i915->gt.closed_lock); 850 } 851 spin_unlock_irq(&i915->gt.closed_lock); 852 } 853 854 static void __i915_vma_iounmap(struct i915_vma *vma) 855 { 856 GEM_BUG_ON(i915_vma_is_pinned(vma)); 857 858 if (vma->iomap == NULL) 859 return; 860 861 io_mapping_unmap(vma->iomap); 862 vma->iomap = NULL; 863 } 864 865 void i915_vma_revoke_mmap(struct i915_vma *vma) 866 { 867 struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 868 u64 vma_offset; 869 870 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 871 872 if (!i915_vma_has_userfault(vma)) 873 return; 874 875 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 876 GEM_BUG_ON(!vma->obj->userfault_count); 877 878 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 879 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 880 drm_vma_node_offset_addr(node) + vma_offset, 881 vma->size, 882 1); 883 884 i915_vma_unset_userfault(vma); 885 if (!--vma->obj->userfault_count) 886 list_del(&vma->obj->userfault_link); 887 } 888 889 static void export_fence(struct i915_vma *vma, 890 struct i915_request *rq, 891 unsigned int flags) 892 { 893 struct reservation_object *resv = vma->resv; 894 895 /* 896 * Ignore errors from failing to allocate the new fence, we can't 897 * handle an error right now. Worst case should be missed 898 * synchronisation leading to rendering corruption. 899 */ 900 if (flags & EXEC_OBJECT_WRITE) 901 reservation_object_add_excl_fence(resv, &rq->fence); 902 else if (reservation_object_reserve_shared(resv, 1) == 0) 903 reservation_object_add_shared_fence(resv, &rq->fence); 904 } 905 906 int i915_vma_move_to_active(struct i915_vma *vma, 907 struct i915_request *rq, 908 unsigned int flags) 909 { 910 struct drm_i915_gem_object *obj = vma->obj; 911 int err; 912 913 assert_vma_held(vma); 914 assert_object_held(obj); 915 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 916 917 /* 918 * Add a reference if we're newly entering the active list. 919 * The order in which we add operations to the retirement queue is 920 * vital here: mark_active adds to the start of the callback list, 921 * such that subsequent callbacks are called first. Therefore we 922 * add the active reference first and queue for it to be dropped 923 * *last*. 924 */ 925 err = i915_active_ref(&vma->active, rq->fence.context, rq); 926 if (unlikely(err)) 927 return err; 928 929 obj->write_domain = 0; 930 if (flags & EXEC_OBJECT_WRITE) { 931 obj->write_domain = I915_GEM_DOMAIN_RENDER; 932 933 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 934 __i915_active_request_set(&obj->frontbuffer_write, rq); 935 936 obj->read_domains = 0; 937 } 938 obj->read_domains |= I915_GEM_GPU_DOMAINS; 939 obj->mm.dirty = true; 940 941 if (flags & EXEC_OBJECT_NEEDS_FENCE) 942 __i915_active_request_set(&vma->last_fence, rq); 943 944 export_fence(vma, rq, flags); 945 946 GEM_BUG_ON(!i915_vma_is_active(vma)); 947 return 0; 948 } 949 950 int i915_vma_unbind(struct i915_vma *vma) 951 { 952 int ret; 953 954 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 955 956 /* 957 * First wait upon any activity as retiring the request may 958 * have side-effects such as unpinning or even unbinding this vma. 959 */ 960 might_sleep(); 961 if (i915_vma_is_active(vma)) { 962 /* 963 * When a closed VMA is retired, it is unbound - eek. 964 * In order to prevent it from being recursively closed, 965 * take a pin on the vma so that the second unbind is 966 * aborted. 967 * 968 * Even more scary is that the retire callback may free 969 * the object (last active vma). To prevent the explosion 970 * we defer the actual object free to a worker that can 971 * only proceed once it acquires the struct_mutex (which 972 * we currently hold, therefore it cannot free this object 973 * before we are finished). 974 */ 975 __i915_vma_pin(vma); 976 977 ret = i915_active_wait(&vma->active); 978 if (ret) 979 goto unpin; 980 981 ret = i915_active_request_retire(&vma->last_fence, 982 &vma->vm->i915->drm.struct_mutex); 983 unpin: 984 __i915_vma_unpin(vma); 985 if (ret) 986 return ret; 987 } 988 GEM_BUG_ON(i915_vma_is_active(vma)); 989 990 if (i915_vma_is_pinned(vma)) { 991 vma_print_allocator(vma, "is pinned"); 992 return -EBUSY; 993 } 994 995 if (!drm_mm_node_allocated(&vma->node)) 996 return 0; 997 998 if (i915_vma_is_map_and_fenceable(vma)) { 999 /* 1000 * Check that we have flushed all writes through the GGTT 1001 * before the unbind, other due to non-strict nature of those 1002 * indirect writes they may end up referencing the GGTT PTE 1003 * after the unbind. 1004 */ 1005 i915_vma_flush_writes(vma); 1006 GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); 1007 1008 /* release the fence reg _after_ flushing */ 1009 ret = i915_vma_put_fence(vma); 1010 if (ret) 1011 return ret; 1012 1013 /* Force a pagefault for domain tracking on next user access */ 1014 i915_vma_revoke_mmap(vma); 1015 1016 __i915_vma_iounmap(vma); 1017 vma->flags &= ~I915_VMA_CAN_FENCE; 1018 } 1019 GEM_BUG_ON(vma->fence); 1020 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1021 1022 if (likely(!vma->vm->closed)) { 1023 trace_i915_vma_unbind(vma); 1024 vma->ops->unbind_vma(vma); 1025 } 1026 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 1027 1028 i915_vma_remove(vma); 1029 1030 return 0; 1031 } 1032 1033 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1034 #include "selftests/i915_vma.c" 1035 #endif 1036 1037 static void i915_global_vma_shrink(void) 1038 { 1039 kmem_cache_shrink(global.slab_vmas); 1040 } 1041 1042 static void i915_global_vma_exit(void) 1043 { 1044 kmem_cache_destroy(global.slab_vmas); 1045 } 1046 1047 static struct i915_global_vma global = { { 1048 .shrink = i915_global_vma_shrink, 1049 .exit = i915_global_vma_exit, 1050 } }; 1051 1052 int __init i915_global_vma_init(void) 1053 { 1054 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 1055 if (!global.slab_vmas) 1056 return -ENOMEM; 1057 1058 i915_global_register(&global.base); 1059 return 0; 1060 } 1061