1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_vma.h" 26 27 #include "i915_drv.h" 28 #include "i915_globals.h" 29 #include "intel_ringbuffer.h" 30 #include "intel_frontbuffer.h" 31 32 #include <drm/drm_gem.h> 33 34 static struct i915_global_vma { 35 struct i915_global base; 36 struct kmem_cache *slab_vmas; 37 } global; 38 39 struct i915_vma *i915_vma_alloc(void) 40 { 41 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 42 } 43 44 void i915_vma_free(struct i915_vma *vma) 45 { 46 return kmem_cache_free(global.slab_vmas, vma); 47 } 48 49 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 50 51 #include <linux/stackdepot.h> 52 53 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 54 { 55 unsigned long *entries; 56 unsigned int nr_entries; 57 char buf[512]; 58 59 if (!vma->node.stack) { 60 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 61 vma->node.start, vma->node.size, reason); 62 return; 63 } 64 65 nr_entries = stack_depot_fetch(vma->node.stack, &entries); 66 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); 67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 68 vma->node.start, vma->node.size, reason, buf); 69 } 70 71 #else 72 73 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 74 { 75 } 76 77 #endif 78 79 static void obj_bump_mru(struct drm_i915_gem_object *obj) 80 { 81 struct drm_i915_private *i915 = to_i915(obj->base.dev); 82 83 spin_lock(&i915->mm.obj_lock); 84 if (obj->bind_count) 85 list_move_tail(&obj->mm.link, &i915->mm.bound_list); 86 spin_unlock(&i915->mm.obj_lock); 87 88 obj->mm.dirty = true; /* be paranoid */ 89 } 90 91 static void __i915_vma_retire(struct i915_active *ref) 92 { 93 struct i915_vma *vma = container_of(ref, typeof(*vma), active); 94 struct drm_i915_gem_object *obj = vma->obj; 95 96 GEM_BUG_ON(!i915_gem_object_is_active(obj)); 97 if (--obj->active_count) 98 return; 99 100 /* Prune the shared fence arrays iff completely idle (inc. external) */ 101 if (reservation_object_trylock(obj->resv)) { 102 if (reservation_object_test_signaled_rcu(obj->resv, true)) 103 reservation_object_add_excl_fence(obj->resv, NULL); 104 reservation_object_unlock(obj->resv); 105 } 106 107 /* 108 * Bump our place on the bound list to keep it roughly in LRU order 109 * so that we don't steal from recently used but inactive objects 110 * (unless we are forced to ofc!) 111 */ 112 obj_bump_mru(obj); 113 114 if (i915_gem_object_has_active_reference(obj)) { 115 i915_gem_object_clear_active_reference(obj); 116 i915_gem_object_put(obj); 117 } 118 } 119 120 static struct i915_vma * 121 vma_create(struct drm_i915_gem_object *obj, 122 struct i915_address_space *vm, 123 const struct i915_ggtt_view *view) 124 { 125 struct i915_vma *vma; 126 struct rb_node *rb, **p; 127 128 /* The aliasing_ppgtt should never be used directly! */ 129 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 130 131 vma = i915_vma_alloc(); 132 if (vma == NULL) 133 return ERR_PTR(-ENOMEM); 134 135 i915_active_init(vm->i915, &vma->active, __i915_vma_retire); 136 INIT_ACTIVE_REQUEST(&vma->last_fence); 137 138 vma->vm = vm; 139 vma->ops = &vm->vma_ops; 140 vma->obj = obj; 141 vma->resv = obj->resv; 142 vma->size = obj->base.size; 143 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 144 145 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 146 vma->ggtt_view = *view; 147 if (view->type == I915_GGTT_VIEW_PARTIAL) { 148 GEM_BUG_ON(range_overflows_t(u64, 149 view->partial.offset, 150 view->partial.size, 151 obj->base.size >> PAGE_SHIFT)); 152 vma->size = view->partial.size; 153 vma->size <<= PAGE_SHIFT; 154 GEM_BUG_ON(vma->size > obj->base.size); 155 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 156 vma->size = intel_rotation_info_size(&view->rotated); 157 vma->size <<= PAGE_SHIFT; 158 } 159 } 160 161 if (unlikely(vma->size > vm->total)) 162 goto err_vma; 163 164 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 165 166 if (i915_is_ggtt(vm)) { 167 if (unlikely(overflows_type(vma->size, u32))) 168 goto err_vma; 169 170 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 171 i915_gem_object_get_tiling(obj), 172 i915_gem_object_get_stride(obj)); 173 if (unlikely(vma->fence_size < vma->size || /* overflow */ 174 vma->fence_size > vm->total)) 175 goto err_vma; 176 177 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 178 179 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 180 i915_gem_object_get_tiling(obj), 181 i915_gem_object_get_stride(obj)); 182 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 183 184 vma->flags |= I915_VMA_GGTT; 185 } 186 187 spin_lock(&obj->vma.lock); 188 189 rb = NULL; 190 p = &obj->vma.tree.rb_node; 191 while (*p) { 192 struct i915_vma *pos; 193 long cmp; 194 195 rb = *p; 196 pos = rb_entry(rb, struct i915_vma, obj_node); 197 198 /* 199 * If the view already exists in the tree, another thread 200 * already created a matching vma, so return the older instance 201 * and dispose of ours. 202 */ 203 cmp = i915_vma_compare(pos, vm, view); 204 if (cmp == 0) { 205 spin_unlock(&obj->vma.lock); 206 i915_vma_free(vma); 207 return pos; 208 } 209 210 if (cmp < 0) 211 p = &rb->rb_right; 212 else 213 p = &rb->rb_left; 214 } 215 rb_link_node(&vma->obj_node, rb, p); 216 rb_insert_color(&vma->obj_node, &obj->vma.tree); 217 218 if (i915_vma_is_ggtt(vma)) 219 /* 220 * We put the GGTT vma at the start of the vma-list, followed 221 * by the ppGGTT vma. This allows us to break early when 222 * iterating over only the GGTT vma for an object, see 223 * for_each_ggtt_vma() 224 */ 225 list_add(&vma->obj_link, &obj->vma.list); 226 else 227 list_add_tail(&vma->obj_link, &obj->vma.list); 228 229 spin_unlock(&obj->vma.lock); 230 231 mutex_lock(&vm->mutex); 232 list_add(&vma->vm_link, &vm->unbound_list); 233 mutex_unlock(&vm->mutex); 234 235 return vma; 236 237 err_vma: 238 i915_vma_free(vma); 239 return ERR_PTR(-E2BIG); 240 } 241 242 static struct i915_vma * 243 vma_lookup(struct drm_i915_gem_object *obj, 244 struct i915_address_space *vm, 245 const struct i915_ggtt_view *view) 246 { 247 struct rb_node *rb; 248 249 rb = obj->vma.tree.rb_node; 250 while (rb) { 251 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 252 long cmp; 253 254 cmp = i915_vma_compare(vma, vm, view); 255 if (cmp == 0) 256 return vma; 257 258 if (cmp < 0) 259 rb = rb->rb_right; 260 else 261 rb = rb->rb_left; 262 } 263 264 return NULL; 265 } 266 267 /** 268 * i915_vma_instance - return the singleton instance of the VMA 269 * @obj: parent &struct drm_i915_gem_object to be mapped 270 * @vm: address space in which the mapping is located 271 * @view: additional mapping requirements 272 * 273 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 274 * the same @view characteristics. If a match is not found, one is created. 275 * Once created, the VMA is kept until either the object is freed, or the 276 * address space is closed. 277 * 278 * Must be called with struct_mutex held. 279 * 280 * Returns the vma, or an error pointer. 281 */ 282 struct i915_vma * 283 i915_vma_instance(struct drm_i915_gem_object *obj, 284 struct i915_address_space *vm, 285 const struct i915_ggtt_view *view) 286 { 287 struct i915_vma *vma; 288 289 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 290 GEM_BUG_ON(vm->closed); 291 292 spin_lock(&obj->vma.lock); 293 vma = vma_lookup(obj, vm, view); 294 spin_unlock(&obj->vma.lock); 295 296 /* vma_create() will resolve the race if another creates the vma */ 297 if (unlikely(!vma)) 298 vma = vma_create(obj, vm, view); 299 300 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 301 return vma; 302 } 303 304 /** 305 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 306 * @vma: VMA to map 307 * @cache_level: mapping cache level 308 * @flags: flags like global or local mapping 309 * 310 * DMA addresses are taken from the scatter-gather table of this object (or of 311 * this VMA in case of non-default GGTT views) and PTE entries set up. 312 * Note that DMA addresses are also the only part of the SG table we care about. 313 */ 314 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 315 u32 flags) 316 { 317 u32 bind_flags; 318 u32 vma_flags; 319 int ret; 320 321 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 322 GEM_BUG_ON(vma->size > vma->node.size); 323 324 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 325 vma->node.size, 326 vma->vm->total))) 327 return -ENODEV; 328 329 if (GEM_DEBUG_WARN_ON(!flags)) 330 return -EINVAL; 331 332 bind_flags = 0; 333 if (flags & PIN_GLOBAL) 334 bind_flags |= I915_VMA_GLOBAL_BIND; 335 if (flags & PIN_USER) 336 bind_flags |= I915_VMA_LOCAL_BIND; 337 338 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 339 if (flags & PIN_UPDATE) 340 bind_flags |= vma_flags; 341 else 342 bind_flags &= ~vma_flags; 343 if (bind_flags == 0) 344 return 0; 345 346 GEM_BUG_ON(!vma->pages); 347 348 trace_i915_vma_bind(vma, bind_flags); 349 ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 350 if (ret) 351 return ret; 352 353 vma->flags |= bind_flags; 354 return 0; 355 } 356 357 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 358 { 359 void __iomem *ptr; 360 int err; 361 362 /* Access through the GTT requires the device to be awake. */ 363 assert_rpm_wakelock_held(vma->vm->i915); 364 365 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 366 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 367 err = -ENODEV; 368 goto err; 369 } 370 371 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 372 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 373 374 ptr = vma->iomap; 375 if (ptr == NULL) { 376 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 377 vma->node.start, 378 vma->node.size); 379 if (ptr == NULL) { 380 err = -ENOMEM; 381 goto err; 382 } 383 384 vma->iomap = ptr; 385 } 386 387 __i915_vma_pin(vma); 388 389 err = i915_vma_pin_fence(vma); 390 if (err) 391 goto err_unpin; 392 393 i915_vma_set_ggtt_write(vma); 394 return ptr; 395 396 err_unpin: 397 __i915_vma_unpin(vma); 398 err: 399 return IO_ERR_PTR(err); 400 } 401 402 void i915_vma_flush_writes(struct i915_vma *vma) 403 { 404 if (!i915_vma_has_ggtt_write(vma)) 405 return; 406 407 i915_gem_flush_ggtt_writes(vma->vm->i915); 408 409 i915_vma_unset_ggtt_write(vma); 410 } 411 412 void i915_vma_unpin_iomap(struct i915_vma *vma) 413 { 414 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 415 416 GEM_BUG_ON(vma->iomap == NULL); 417 418 i915_vma_flush_writes(vma); 419 420 i915_vma_unpin_fence(vma); 421 i915_vma_unpin(vma); 422 } 423 424 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 425 { 426 struct i915_vma *vma; 427 struct drm_i915_gem_object *obj; 428 429 vma = fetch_and_zero(p_vma); 430 if (!vma) 431 return; 432 433 obj = vma->obj; 434 GEM_BUG_ON(!obj); 435 436 i915_vma_unpin(vma); 437 i915_vma_close(vma); 438 439 if (flags & I915_VMA_RELEASE_MAP) 440 i915_gem_object_unpin_map(obj); 441 442 __i915_gem_object_release_unless_active(obj); 443 } 444 445 bool i915_vma_misplaced(const struct i915_vma *vma, 446 u64 size, u64 alignment, u64 flags) 447 { 448 if (!drm_mm_node_allocated(&vma->node)) 449 return false; 450 451 if (vma->node.size < size) 452 return true; 453 454 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 455 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 456 return true; 457 458 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 459 return true; 460 461 if (flags & PIN_OFFSET_BIAS && 462 vma->node.start < (flags & PIN_OFFSET_MASK)) 463 return true; 464 465 if (flags & PIN_OFFSET_FIXED && 466 vma->node.start != (flags & PIN_OFFSET_MASK)) 467 return true; 468 469 return false; 470 } 471 472 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 473 { 474 bool mappable, fenceable; 475 476 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 477 GEM_BUG_ON(!vma->fence_size); 478 479 /* 480 * Explicitly disable for rotated VMA since the display does not 481 * need the fence and the VMA is not accessible to other users. 482 */ 483 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 484 return; 485 486 fenceable = (vma->node.size >= vma->fence_size && 487 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 488 489 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 490 491 if (mappable && fenceable) 492 vma->flags |= I915_VMA_CAN_FENCE; 493 else 494 vma->flags &= ~I915_VMA_CAN_FENCE; 495 } 496 497 static bool color_differs(struct drm_mm_node *node, unsigned long color) 498 { 499 return node->allocated && node->color != color; 500 } 501 502 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) 503 { 504 struct drm_mm_node *node = &vma->node; 505 struct drm_mm_node *other; 506 507 /* 508 * On some machines we have to be careful when putting differing types 509 * of snoopable memory together to avoid the prefetcher crossing memory 510 * domains and dying. During vm initialisation, we decide whether or not 511 * these constraints apply and set the drm_mm.color_adjust 512 * appropriately. 513 */ 514 if (vma->vm->mm.color_adjust == NULL) 515 return true; 516 517 /* Only valid to be called on an already inserted vma */ 518 GEM_BUG_ON(!drm_mm_node_allocated(node)); 519 GEM_BUG_ON(list_empty(&node->node_list)); 520 521 other = list_prev_entry(node, node_list); 522 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) 523 return false; 524 525 other = list_next_entry(node, node_list); 526 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) 527 return false; 528 529 return true; 530 } 531 532 static void assert_bind_count(const struct drm_i915_gem_object *obj) 533 { 534 /* 535 * Combine the assertion that the object is bound and that we have 536 * pinned its pages. But we should never have bound the object 537 * more than we have pinned its pages. (For complete accuracy, we 538 * assume that no else is pinning the pages, but as a rough assertion 539 * that we will not run into problems later, this will do!) 540 */ 541 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 542 } 543 544 /** 545 * i915_vma_insert - finds a slot for the vma in its address space 546 * @vma: the vma 547 * @size: requested size in bytes (can be larger than the VMA) 548 * @alignment: required alignment 549 * @flags: mask of PIN_* flags to use 550 * 551 * First we try to allocate some free space that meets the requirements for 552 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 553 * preferrably the oldest idle entry to make room for the new VMA. 554 * 555 * Returns: 556 * 0 on success, negative error code otherwise. 557 */ 558 static int 559 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 560 { 561 struct drm_i915_private *dev_priv = vma->vm->i915; 562 unsigned int cache_level; 563 u64 start, end; 564 int ret; 565 566 GEM_BUG_ON(i915_vma_is_closed(vma)); 567 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 568 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 569 570 size = max(size, vma->size); 571 alignment = max(alignment, vma->display_alignment); 572 if (flags & PIN_MAPPABLE) { 573 size = max_t(typeof(size), size, vma->fence_size); 574 alignment = max_t(typeof(alignment), 575 alignment, vma->fence_alignment); 576 } 577 578 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 579 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 580 GEM_BUG_ON(!is_power_of_2(alignment)); 581 582 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 583 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 584 585 end = vma->vm->total; 586 if (flags & PIN_MAPPABLE) 587 end = min_t(u64, end, dev_priv->ggtt.mappable_end); 588 if (flags & PIN_ZONE_4G) 589 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 590 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 591 592 /* If binding the object/GGTT view requires more space than the entire 593 * aperture has, reject it early before evicting everything in a vain 594 * attempt to find space. 595 */ 596 if (size > end) { 597 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 598 size, flags & PIN_MAPPABLE ? "mappable" : "total", 599 end); 600 return -ENOSPC; 601 } 602 603 if (vma->obj) { 604 ret = i915_gem_object_pin_pages(vma->obj); 605 if (ret) 606 return ret; 607 608 cache_level = vma->obj->cache_level; 609 } else { 610 cache_level = 0; 611 } 612 613 GEM_BUG_ON(vma->pages); 614 615 ret = vma->ops->set_pages(vma); 616 if (ret) 617 goto err_unpin; 618 619 if (flags & PIN_OFFSET_FIXED) { 620 u64 offset = flags & PIN_OFFSET_MASK; 621 if (!IS_ALIGNED(offset, alignment) || 622 range_overflows(offset, size, end)) { 623 ret = -EINVAL; 624 goto err_clear; 625 } 626 627 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 628 size, offset, cache_level, 629 flags); 630 if (ret) 631 goto err_clear; 632 } else { 633 /* 634 * We only support huge gtt pages through the 48b PPGTT, 635 * however we also don't want to force any alignment for 636 * objects which need to be tightly packed into the low 32bits. 637 * 638 * Note that we assume that GGTT are limited to 4GiB for the 639 * forseeable future. See also i915_ggtt_offset(). 640 */ 641 if (upper_32_bits(end - 1) && 642 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 643 /* 644 * We can't mix 64K and 4K PTEs in the same page-table 645 * (2M block), and so to avoid the ugliness and 646 * complexity of coloring we opt for just aligning 64K 647 * objects to 2M. 648 */ 649 u64 page_alignment = 650 rounddown_pow_of_two(vma->page_sizes.sg | 651 I915_GTT_PAGE_SIZE_2M); 652 653 /* 654 * Check we don't expand for the limited Global GTT 655 * (mappable aperture is even more precious!). This 656 * also checks that we exclude the aliasing-ppgtt. 657 */ 658 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 659 660 alignment = max(alignment, page_alignment); 661 662 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 663 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 664 } 665 666 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 667 size, alignment, cache_level, 668 start, end, flags); 669 if (ret) 670 goto err_clear; 671 672 GEM_BUG_ON(vma->node.start < start); 673 GEM_BUG_ON(vma->node.start + vma->node.size > end); 674 } 675 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 676 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); 677 678 mutex_lock(&vma->vm->mutex); 679 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 680 mutex_unlock(&vma->vm->mutex); 681 682 if (vma->obj) { 683 struct drm_i915_gem_object *obj = vma->obj; 684 685 spin_lock(&dev_priv->mm.obj_lock); 686 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); 687 obj->bind_count++; 688 spin_unlock(&dev_priv->mm.obj_lock); 689 690 assert_bind_count(obj); 691 } 692 693 return 0; 694 695 err_clear: 696 vma->ops->clear_pages(vma); 697 err_unpin: 698 if (vma->obj) 699 i915_gem_object_unpin_pages(vma->obj); 700 return ret; 701 } 702 703 static void 704 i915_vma_remove(struct i915_vma *vma) 705 { 706 struct drm_i915_private *i915 = vma->vm->i915; 707 708 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 709 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 710 711 vma->ops->clear_pages(vma); 712 713 mutex_lock(&vma->vm->mutex); 714 drm_mm_remove_node(&vma->node); 715 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 716 mutex_unlock(&vma->vm->mutex); 717 718 /* 719 * Since the unbound list is global, only move to that list if 720 * no more VMAs exist. 721 */ 722 if (vma->obj) { 723 struct drm_i915_gem_object *obj = vma->obj; 724 725 spin_lock(&i915->mm.obj_lock); 726 if (--obj->bind_count == 0) 727 list_move_tail(&obj->mm.link, &i915->mm.unbound_list); 728 spin_unlock(&i915->mm.obj_lock); 729 730 /* 731 * And finally now the object is completely decoupled from this 732 * vma, we can drop its hold on the backing storage and allow 733 * it to be reaped by the shrinker. 734 */ 735 i915_gem_object_unpin_pages(obj); 736 assert_bind_count(obj); 737 } 738 } 739 740 int __i915_vma_do_pin(struct i915_vma *vma, 741 u64 size, u64 alignment, u64 flags) 742 { 743 const unsigned int bound = vma->flags; 744 int ret; 745 746 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 747 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 748 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 749 750 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 751 ret = -EBUSY; 752 goto err_unpin; 753 } 754 755 if ((bound & I915_VMA_BIND_MASK) == 0) { 756 ret = i915_vma_insert(vma, size, alignment, flags); 757 if (ret) 758 goto err_unpin; 759 } 760 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 761 762 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); 763 if (ret) 764 goto err_remove; 765 766 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); 767 768 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 769 __i915_vma_set_map_and_fenceable(vma); 770 771 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 772 return 0; 773 774 err_remove: 775 if ((bound & I915_VMA_BIND_MASK) == 0) { 776 i915_vma_remove(vma); 777 GEM_BUG_ON(vma->pages); 778 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); 779 } 780 err_unpin: 781 __i915_vma_unpin(vma); 782 return ret; 783 } 784 785 void i915_vma_close(struct i915_vma *vma) 786 { 787 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 788 789 GEM_BUG_ON(i915_vma_is_closed(vma)); 790 vma->flags |= I915_VMA_CLOSED; 791 792 /* 793 * We defer actually closing, unbinding and destroying the VMA until 794 * the next idle point, or if the object is freed in the meantime. By 795 * postponing the unbind, we allow for it to be resurrected by the 796 * client, avoiding the work required to rebind the VMA. This is 797 * advantageous for DRI, where the client/server pass objects 798 * between themselves, temporarily opening a local VMA to the 799 * object, and then closing it again. The same object is then reused 800 * on the next frame (or two, depending on the depth of the swap queue) 801 * causing us to rebind the VMA once more. This ends up being a lot 802 * of wasted work for the steady state. 803 */ 804 list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); 805 } 806 807 void i915_vma_reopen(struct i915_vma *vma) 808 { 809 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 810 811 if (vma->flags & I915_VMA_CLOSED) { 812 vma->flags &= ~I915_VMA_CLOSED; 813 list_del(&vma->closed_link); 814 } 815 } 816 817 static void __i915_vma_destroy(struct i915_vma *vma) 818 { 819 GEM_BUG_ON(vma->node.allocated); 820 GEM_BUG_ON(vma->fence); 821 822 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence)); 823 824 mutex_lock(&vma->vm->mutex); 825 list_del(&vma->vm_link); 826 mutex_unlock(&vma->vm->mutex); 827 828 if (vma->obj) { 829 struct drm_i915_gem_object *obj = vma->obj; 830 831 spin_lock(&obj->vma.lock); 832 list_del(&vma->obj_link); 833 rb_erase(&vma->obj_node, &vma->obj->vma.tree); 834 spin_unlock(&obj->vma.lock); 835 } 836 837 i915_active_fini(&vma->active); 838 839 i915_vma_free(vma); 840 } 841 842 void i915_vma_destroy(struct i915_vma *vma) 843 { 844 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 845 846 GEM_BUG_ON(i915_vma_is_active(vma)); 847 GEM_BUG_ON(i915_vma_is_pinned(vma)); 848 849 if (i915_vma_is_closed(vma)) 850 list_del(&vma->closed_link); 851 852 WARN_ON(i915_vma_unbind(vma)); 853 __i915_vma_destroy(vma); 854 } 855 856 void i915_vma_parked(struct drm_i915_private *i915) 857 { 858 struct i915_vma *vma, *next; 859 860 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { 861 GEM_BUG_ON(!i915_vma_is_closed(vma)); 862 i915_vma_destroy(vma); 863 } 864 865 GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); 866 } 867 868 static void __i915_vma_iounmap(struct i915_vma *vma) 869 { 870 GEM_BUG_ON(i915_vma_is_pinned(vma)); 871 872 if (vma->iomap == NULL) 873 return; 874 875 io_mapping_unmap(vma->iomap); 876 vma->iomap = NULL; 877 } 878 879 void i915_vma_revoke_mmap(struct i915_vma *vma) 880 { 881 struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 882 u64 vma_offset; 883 884 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 885 886 if (!i915_vma_has_userfault(vma)) 887 return; 888 889 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 890 GEM_BUG_ON(!vma->obj->userfault_count); 891 892 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 893 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 894 drm_vma_node_offset_addr(node) + vma_offset, 895 vma->size, 896 1); 897 898 i915_vma_unset_userfault(vma); 899 if (!--vma->obj->userfault_count) 900 list_del(&vma->obj->userfault_link); 901 } 902 903 static void export_fence(struct i915_vma *vma, 904 struct i915_request *rq, 905 unsigned int flags) 906 { 907 struct reservation_object *resv = vma->resv; 908 909 /* 910 * Ignore errors from failing to allocate the new fence, we can't 911 * handle an error right now. Worst case should be missed 912 * synchronisation leading to rendering corruption. 913 */ 914 reservation_object_lock(resv, NULL); 915 if (flags & EXEC_OBJECT_WRITE) 916 reservation_object_add_excl_fence(resv, &rq->fence); 917 else if (reservation_object_reserve_shared(resv, 1) == 0) 918 reservation_object_add_shared_fence(resv, &rq->fence); 919 reservation_object_unlock(resv); 920 } 921 922 int i915_vma_move_to_active(struct i915_vma *vma, 923 struct i915_request *rq, 924 unsigned int flags) 925 { 926 struct drm_i915_gem_object *obj = vma->obj; 927 928 lockdep_assert_held(&rq->i915->drm.struct_mutex); 929 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 930 931 /* 932 * Add a reference if we're newly entering the active list. 933 * The order in which we add operations to the retirement queue is 934 * vital here: mark_active adds to the start of the callback list, 935 * such that subsequent callbacks are called first. Therefore we 936 * add the active reference first and queue for it to be dropped 937 * *last*. 938 */ 939 if (!vma->active.count) 940 obj->active_count++; 941 942 if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { 943 if (!vma->active.count) 944 obj->active_count--; 945 return -ENOMEM; 946 } 947 948 GEM_BUG_ON(!i915_vma_is_active(vma)); 949 GEM_BUG_ON(!obj->active_count); 950 951 obj->write_domain = 0; 952 if (flags & EXEC_OBJECT_WRITE) { 953 obj->write_domain = I915_GEM_DOMAIN_RENDER; 954 955 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 956 __i915_active_request_set(&obj->frontbuffer_write, rq); 957 958 obj->read_domains = 0; 959 } 960 obj->read_domains |= I915_GEM_GPU_DOMAINS; 961 962 if (flags & EXEC_OBJECT_NEEDS_FENCE) 963 __i915_active_request_set(&vma->last_fence, rq); 964 965 export_fence(vma, rq, flags); 966 return 0; 967 } 968 969 int i915_vma_unbind(struct i915_vma *vma) 970 { 971 int ret; 972 973 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 974 975 /* 976 * First wait upon any activity as retiring the request may 977 * have side-effects such as unpinning or even unbinding this vma. 978 */ 979 might_sleep(); 980 if (i915_vma_is_active(vma)) { 981 /* 982 * When a closed VMA is retired, it is unbound - eek. 983 * In order to prevent it from being recursively closed, 984 * take a pin on the vma so that the second unbind is 985 * aborted. 986 * 987 * Even more scary is that the retire callback may free 988 * the object (last active vma). To prevent the explosion 989 * we defer the actual object free to a worker that can 990 * only proceed once it acquires the struct_mutex (which 991 * we currently hold, therefore it cannot free this object 992 * before we are finished). 993 */ 994 __i915_vma_pin(vma); 995 996 ret = i915_active_wait(&vma->active); 997 if (ret) 998 goto unpin; 999 1000 ret = i915_active_request_retire(&vma->last_fence, 1001 &vma->vm->i915->drm.struct_mutex); 1002 unpin: 1003 __i915_vma_unpin(vma); 1004 if (ret) 1005 return ret; 1006 } 1007 GEM_BUG_ON(i915_vma_is_active(vma)); 1008 1009 if (i915_vma_is_pinned(vma)) { 1010 vma_print_allocator(vma, "is pinned"); 1011 return -EBUSY; 1012 } 1013 1014 if (!drm_mm_node_allocated(&vma->node)) 1015 return 0; 1016 1017 if (i915_vma_is_map_and_fenceable(vma)) { 1018 /* 1019 * Check that we have flushed all writes through the GGTT 1020 * before the unbind, other due to non-strict nature of those 1021 * indirect writes they may end up referencing the GGTT PTE 1022 * after the unbind. 1023 */ 1024 i915_vma_flush_writes(vma); 1025 GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); 1026 1027 /* release the fence reg _after_ flushing */ 1028 ret = i915_vma_put_fence(vma); 1029 if (ret) 1030 return ret; 1031 1032 /* Force a pagefault for domain tracking on next user access */ 1033 i915_vma_revoke_mmap(vma); 1034 1035 __i915_vma_iounmap(vma); 1036 vma->flags &= ~I915_VMA_CAN_FENCE; 1037 } 1038 GEM_BUG_ON(vma->fence); 1039 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1040 1041 if (likely(!vma->vm->closed)) { 1042 trace_i915_vma_unbind(vma); 1043 vma->ops->unbind_vma(vma); 1044 } 1045 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 1046 1047 i915_vma_remove(vma); 1048 1049 return 0; 1050 } 1051 1052 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1053 #include "selftests/i915_vma.c" 1054 #endif 1055 1056 static void i915_global_vma_shrink(void) 1057 { 1058 kmem_cache_shrink(global.slab_vmas); 1059 } 1060 1061 static void i915_global_vma_exit(void) 1062 { 1063 kmem_cache_destroy(global.slab_vmas); 1064 } 1065 1066 static struct i915_global_vma global = { { 1067 .shrink = i915_global_vma_shrink, 1068 .exit = i915_global_vma_exit, 1069 } }; 1070 1071 int __init i915_global_vma_init(void) 1072 { 1073 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 1074 if (!global.slab_vmas) 1075 return -ENOMEM; 1076 1077 i915_global_register(&global.base); 1078 return 0; 1079 } 1080