1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_vma.h" 26 27 #include "i915_drv.h" 28 #include "intel_ringbuffer.h" 29 #include "intel_frontbuffer.h" 30 31 #include <drm/drm_gem.h> 32 33 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 34 35 #include <linux/stackdepot.h> 36 37 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 38 { 39 unsigned long entries[12]; 40 struct stack_trace trace = { 41 .entries = entries, 42 .max_entries = ARRAY_SIZE(entries), 43 }; 44 char buf[512]; 45 46 if (!vma->node.stack) { 47 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 48 vma->node.start, vma->node.size, reason); 49 return; 50 } 51 52 depot_fetch_stack(vma->node.stack, &trace); 53 snprint_stack_trace(buf, sizeof(buf), &trace, 0); 54 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 55 vma->node.start, vma->node.size, reason, buf); 56 } 57 58 #else 59 60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 61 { 62 } 63 64 #endif 65 66 static void obj_bump_mru(struct drm_i915_gem_object *obj) 67 { 68 struct drm_i915_private *i915 = to_i915(obj->base.dev); 69 70 spin_lock(&i915->mm.obj_lock); 71 if (obj->bind_count) 72 list_move_tail(&obj->mm.link, &i915->mm.bound_list); 73 spin_unlock(&i915->mm.obj_lock); 74 75 obj->mm.dirty = true; /* be paranoid */ 76 } 77 78 static void __i915_vma_retire(struct i915_active *ref) 79 { 80 struct i915_vma *vma = container_of(ref, typeof(*vma), active); 81 struct drm_i915_gem_object *obj = vma->obj; 82 83 GEM_BUG_ON(!i915_gem_object_is_active(obj)); 84 if (--obj->active_count) 85 return; 86 87 /* Prune the shared fence arrays iff completely idle (inc. external) */ 88 if (reservation_object_trylock(obj->resv)) { 89 if (reservation_object_test_signaled_rcu(obj->resv, true)) 90 reservation_object_add_excl_fence(obj->resv, NULL); 91 reservation_object_unlock(obj->resv); 92 } 93 94 /* 95 * Bump our place on the bound list to keep it roughly in LRU order 96 * so that we don't steal from recently used but inactive objects 97 * (unless we are forced to ofc!) 98 */ 99 obj_bump_mru(obj); 100 101 if (i915_gem_object_has_active_reference(obj)) { 102 i915_gem_object_clear_active_reference(obj); 103 i915_gem_object_put(obj); 104 } 105 } 106 107 static struct i915_vma * 108 vma_create(struct drm_i915_gem_object *obj, 109 struct i915_address_space *vm, 110 const struct i915_ggtt_view *view) 111 { 112 struct i915_vma *vma; 113 struct rb_node *rb, **p; 114 115 /* The aliasing_ppgtt should never be used directly! */ 116 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 117 118 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); 119 if (vma == NULL) 120 return ERR_PTR(-ENOMEM); 121 122 i915_active_init(vm->i915, &vma->active, __i915_vma_retire); 123 INIT_ACTIVE_REQUEST(&vma->last_fence); 124 125 vma->vm = vm; 126 vma->ops = &vm->vma_ops; 127 vma->obj = obj; 128 vma->resv = obj->resv; 129 vma->size = obj->base.size; 130 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 131 132 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 133 vma->ggtt_view = *view; 134 if (view->type == I915_GGTT_VIEW_PARTIAL) { 135 GEM_BUG_ON(range_overflows_t(u64, 136 view->partial.offset, 137 view->partial.size, 138 obj->base.size >> PAGE_SHIFT)); 139 vma->size = view->partial.size; 140 vma->size <<= PAGE_SHIFT; 141 GEM_BUG_ON(vma->size > obj->base.size); 142 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 143 vma->size = intel_rotation_info_size(&view->rotated); 144 vma->size <<= PAGE_SHIFT; 145 } 146 } 147 148 if (unlikely(vma->size > vm->total)) 149 goto err_vma; 150 151 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 152 153 if (i915_is_ggtt(vm)) { 154 if (unlikely(overflows_type(vma->size, u32))) 155 goto err_vma; 156 157 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 158 i915_gem_object_get_tiling(obj), 159 i915_gem_object_get_stride(obj)); 160 if (unlikely(vma->fence_size < vma->size || /* overflow */ 161 vma->fence_size > vm->total)) 162 goto err_vma; 163 164 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 165 166 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 167 i915_gem_object_get_tiling(obj), 168 i915_gem_object_get_stride(obj)); 169 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 170 171 vma->flags |= I915_VMA_GGTT; 172 } 173 174 spin_lock(&obj->vma.lock); 175 176 rb = NULL; 177 p = &obj->vma.tree.rb_node; 178 while (*p) { 179 struct i915_vma *pos; 180 long cmp; 181 182 rb = *p; 183 pos = rb_entry(rb, struct i915_vma, obj_node); 184 185 /* 186 * If the view already exists in the tree, another thread 187 * already created a matching vma, so return the older instance 188 * and dispose of ours. 189 */ 190 cmp = i915_vma_compare(pos, vm, view); 191 if (cmp == 0) { 192 spin_unlock(&obj->vma.lock); 193 kmem_cache_free(vm->i915->vmas, vma); 194 return pos; 195 } 196 197 if (cmp < 0) 198 p = &rb->rb_right; 199 else 200 p = &rb->rb_left; 201 } 202 rb_link_node(&vma->obj_node, rb, p); 203 rb_insert_color(&vma->obj_node, &obj->vma.tree); 204 205 if (i915_vma_is_ggtt(vma)) 206 /* 207 * We put the GGTT vma at the start of the vma-list, followed 208 * by the ppGGTT vma. This allows us to break early when 209 * iterating over only the GGTT vma for an object, see 210 * for_each_ggtt_vma() 211 */ 212 list_add(&vma->obj_link, &obj->vma.list); 213 else 214 list_add_tail(&vma->obj_link, &obj->vma.list); 215 216 spin_unlock(&obj->vma.lock); 217 218 mutex_lock(&vm->mutex); 219 list_add(&vma->vm_link, &vm->unbound_list); 220 mutex_unlock(&vm->mutex); 221 222 return vma; 223 224 err_vma: 225 kmem_cache_free(vm->i915->vmas, vma); 226 return ERR_PTR(-E2BIG); 227 } 228 229 static struct i915_vma * 230 vma_lookup(struct drm_i915_gem_object *obj, 231 struct i915_address_space *vm, 232 const struct i915_ggtt_view *view) 233 { 234 struct rb_node *rb; 235 236 rb = obj->vma.tree.rb_node; 237 while (rb) { 238 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 239 long cmp; 240 241 cmp = i915_vma_compare(vma, vm, view); 242 if (cmp == 0) 243 return vma; 244 245 if (cmp < 0) 246 rb = rb->rb_right; 247 else 248 rb = rb->rb_left; 249 } 250 251 return NULL; 252 } 253 254 /** 255 * i915_vma_instance - return the singleton instance of the VMA 256 * @obj: parent &struct drm_i915_gem_object to be mapped 257 * @vm: address space in which the mapping is located 258 * @view: additional mapping requirements 259 * 260 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 261 * the same @view characteristics. If a match is not found, one is created. 262 * Once created, the VMA is kept until either the object is freed, or the 263 * address space is closed. 264 * 265 * Must be called with struct_mutex held. 266 * 267 * Returns the vma, or an error pointer. 268 */ 269 struct i915_vma * 270 i915_vma_instance(struct drm_i915_gem_object *obj, 271 struct i915_address_space *vm, 272 const struct i915_ggtt_view *view) 273 { 274 struct i915_vma *vma; 275 276 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 277 GEM_BUG_ON(vm->closed); 278 279 spin_lock(&obj->vma.lock); 280 vma = vma_lookup(obj, vm, view); 281 spin_unlock(&obj->vma.lock); 282 283 /* vma_create() will resolve the race if another creates the vma */ 284 if (unlikely(!vma)) 285 vma = vma_create(obj, vm, view); 286 287 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 288 return vma; 289 } 290 291 /** 292 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 293 * @vma: VMA to map 294 * @cache_level: mapping cache level 295 * @flags: flags like global or local mapping 296 * 297 * DMA addresses are taken from the scatter-gather table of this object (or of 298 * this VMA in case of non-default GGTT views) and PTE entries set up. 299 * Note that DMA addresses are also the only part of the SG table we care about. 300 */ 301 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 302 u32 flags) 303 { 304 u32 bind_flags; 305 u32 vma_flags; 306 int ret; 307 308 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 309 GEM_BUG_ON(vma->size > vma->node.size); 310 311 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 312 vma->node.size, 313 vma->vm->total))) 314 return -ENODEV; 315 316 if (GEM_DEBUG_WARN_ON(!flags)) 317 return -EINVAL; 318 319 bind_flags = 0; 320 if (flags & PIN_GLOBAL) 321 bind_flags |= I915_VMA_GLOBAL_BIND; 322 if (flags & PIN_USER) 323 bind_flags |= I915_VMA_LOCAL_BIND; 324 325 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 326 if (flags & PIN_UPDATE) 327 bind_flags |= vma_flags; 328 else 329 bind_flags &= ~vma_flags; 330 if (bind_flags == 0) 331 return 0; 332 333 GEM_BUG_ON(!vma->pages); 334 335 trace_i915_vma_bind(vma, bind_flags); 336 ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 337 if (ret) 338 return ret; 339 340 vma->flags |= bind_flags; 341 return 0; 342 } 343 344 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 345 { 346 void __iomem *ptr; 347 int err; 348 349 /* Access through the GTT requires the device to be awake. */ 350 assert_rpm_wakelock_held(vma->vm->i915); 351 352 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 353 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 354 err = -ENODEV; 355 goto err; 356 } 357 358 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 359 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 360 361 ptr = vma->iomap; 362 if (ptr == NULL) { 363 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 364 vma->node.start, 365 vma->node.size); 366 if (ptr == NULL) { 367 err = -ENOMEM; 368 goto err; 369 } 370 371 vma->iomap = ptr; 372 } 373 374 __i915_vma_pin(vma); 375 376 err = i915_vma_pin_fence(vma); 377 if (err) 378 goto err_unpin; 379 380 i915_vma_set_ggtt_write(vma); 381 return ptr; 382 383 err_unpin: 384 __i915_vma_unpin(vma); 385 err: 386 return IO_ERR_PTR(err); 387 } 388 389 void i915_vma_flush_writes(struct i915_vma *vma) 390 { 391 if (!i915_vma_has_ggtt_write(vma)) 392 return; 393 394 i915_gem_flush_ggtt_writes(vma->vm->i915); 395 396 i915_vma_unset_ggtt_write(vma); 397 } 398 399 void i915_vma_unpin_iomap(struct i915_vma *vma) 400 { 401 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 402 403 GEM_BUG_ON(vma->iomap == NULL); 404 405 i915_vma_flush_writes(vma); 406 407 i915_vma_unpin_fence(vma); 408 i915_vma_unpin(vma); 409 } 410 411 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 412 { 413 struct i915_vma *vma; 414 struct drm_i915_gem_object *obj; 415 416 vma = fetch_and_zero(p_vma); 417 if (!vma) 418 return; 419 420 obj = vma->obj; 421 GEM_BUG_ON(!obj); 422 423 i915_vma_unpin(vma); 424 i915_vma_close(vma); 425 426 if (flags & I915_VMA_RELEASE_MAP) 427 i915_gem_object_unpin_map(obj); 428 429 __i915_gem_object_release_unless_active(obj); 430 } 431 432 bool i915_vma_misplaced(const struct i915_vma *vma, 433 u64 size, u64 alignment, u64 flags) 434 { 435 if (!drm_mm_node_allocated(&vma->node)) 436 return false; 437 438 if (vma->node.size < size) 439 return true; 440 441 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 442 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 443 return true; 444 445 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 446 return true; 447 448 if (flags & PIN_OFFSET_BIAS && 449 vma->node.start < (flags & PIN_OFFSET_MASK)) 450 return true; 451 452 if (flags & PIN_OFFSET_FIXED && 453 vma->node.start != (flags & PIN_OFFSET_MASK)) 454 return true; 455 456 return false; 457 } 458 459 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 460 { 461 bool mappable, fenceable; 462 463 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 464 GEM_BUG_ON(!vma->fence_size); 465 466 /* 467 * Explicitly disable for rotated VMA since the display does not 468 * need the fence and the VMA is not accessible to other users. 469 */ 470 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 471 return; 472 473 fenceable = (vma->node.size >= vma->fence_size && 474 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 475 476 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 477 478 if (mappable && fenceable) 479 vma->flags |= I915_VMA_CAN_FENCE; 480 else 481 vma->flags &= ~I915_VMA_CAN_FENCE; 482 } 483 484 static bool color_differs(struct drm_mm_node *node, unsigned long color) 485 { 486 return node->allocated && node->color != color; 487 } 488 489 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) 490 { 491 struct drm_mm_node *node = &vma->node; 492 struct drm_mm_node *other; 493 494 /* 495 * On some machines we have to be careful when putting differing types 496 * of snoopable memory together to avoid the prefetcher crossing memory 497 * domains and dying. During vm initialisation, we decide whether or not 498 * these constraints apply and set the drm_mm.color_adjust 499 * appropriately. 500 */ 501 if (vma->vm->mm.color_adjust == NULL) 502 return true; 503 504 /* Only valid to be called on an already inserted vma */ 505 GEM_BUG_ON(!drm_mm_node_allocated(node)); 506 GEM_BUG_ON(list_empty(&node->node_list)); 507 508 other = list_prev_entry(node, node_list); 509 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) 510 return false; 511 512 other = list_next_entry(node, node_list); 513 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) 514 return false; 515 516 return true; 517 } 518 519 static void assert_bind_count(const struct drm_i915_gem_object *obj) 520 { 521 /* 522 * Combine the assertion that the object is bound and that we have 523 * pinned its pages. But we should never have bound the object 524 * more than we have pinned its pages. (For complete accuracy, we 525 * assume that no else is pinning the pages, but as a rough assertion 526 * that we will not run into problems later, this will do!) 527 */ 528 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 529 } 530 531 /** 532 * i915_vma_insert - finds a slot for the vma in its address space 533 * @vma: the vma 534 * @size: requested size in bytes (can be larger than the VMA) 535 * @alignment: required alignment 536 * @flags: mask of PIN_* flags to use 537 * 538 * First we try to allocate some free space that meets the requirements for 539 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 540 * preferrably the oldest idle entry to make room for the new VMA. 541 * 542 * Returns: 543 * 0 on success, negative error code otherwise. 544 */ 545 static int 546 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 547 { 548 struct drm_i915_private *dev_priv = vma->vm->i915; 549 unsigned int cache_level; 550 u64 start, end; 551 int ret; 552 553 GEM_BUG_ON(i915_vma_is_closed(vma)); 554 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 555 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 556 557 size = max(size, vma->size); 558 alignment = max(alignment, vma->display_alignment); 559 if (flags & PIN_MAPPABLE) { 560 size = max_t(typeof(size), size, vma->fence_size); 561 alignment = max_t(typeof(alignment), 562 alignment, vma->fence_alignment); 563 } 564 565 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 566 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 567 GEM_BUG_ON(!is_power_of_2(alignment)); 568 569 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 570 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 571 572 end = vma->vm->total; 573 if (flags & PIN_MAPPABLE) 574 end = min_t(u64, end, dev_priv->ggtt.mappable_end); 575 if (flags & PIN_ZONE_4G) 576 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 577 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 578 579 /* If binding the object/GGTT view requires more space than the entire 580 * aperture has, reject it early before evicting everything in a vain 581 * attempt to find space. 582 */ 583 if (size > end) { 584 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 585 size, flags & PIN_MAPPABLE ? "mappable" : "total", 586 end); 587 return -ENOSPC; 588 } 589 590 if (vma->obj) { 591 ret = i915_gem_object_pin_pages(vma->obj); 592 if (ret) 593 return ret; 594 595 cache_level = vma->obj->cache_level; 596 } else { 597 cache_level = 0; 598 } 599 600 GEM_BUG_ON(vma->pages); 601 602 ret = vma->ops->set_pages(vma); 603 if (ret) 604 goto err_unpin; 605 606 if (flags & PIN_OFFSET_FIXED) { 607 u64 offset = flags & PIN_OFFSET_MASK; 608 if (!IS_ALIGNED(offset, alignment) || 609 range_overflows(offset, size, end)) { 610 ret = -EINVAL; 611 goto err_clear; 612 } 613 614 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 615 size, offset, cache_level, 616 flags); 617 if (ret) 618 goto err_clear; 619 } else { 620 /* 621 * We only support huge gtt pages through the 48b PPGTT, 622 * however we also don't want to force any alignment for 623 * objects which need to be tightly packed into the low 32bits. 624 * 625 * Note that we assume that GGTT are limited to 4GiB for the 626 * forseeable future. See also i915_ggtt_offset(). 627 */ 628 if (upper_32_bits(end - 1) && 629 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 630 /* 631 * We can't mix 64K and 4K PTEs in the same page-table 632 * (2M block), and so to avoid the ugliness and 633 * complexity of coloring we opt for just aligning 64K 634 * objects to 2M. 635 */ 636 u64 page_alignment = 637 rounddown_pow_of_two(vma->page_sizes.sg | 638 I915_GTT_PAGE_SIZE_2M); 639 640 /* 641 * Check we don't expand for the limited Global GTT 642 * (mappable aperture is even more precious!). This 643 * also checks that we exclude the aliasing-ppgtt. 644 */ 645 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 646 647 alignment = max(alignment, page_alignment); 648 649 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 650 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 651 } 652 653 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 654 size, alignment, cache_level, 655 start, end, flags); 656 if (ret) 657 goto err_clear; 658 659 GEM_BUG_ON(vma->node.start < start); 660 GEM_BUG_ON(vma->node.start + vma->node.size > end); 661 } 662 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 663 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); 664 665 mutex_lock(&vma->vm->mutex); 666 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 667 mutex_unlock(&vma->vm->mutex); 668 669 if (vma->obj) { 670 struct drm_i915_gem_object *obj = vma->obj; 671 672 spin_lock(&dev_priv->mm.obj_lock); 673 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); 674 obj->bind_count++; 675 spin_unlock(&dev_priv->mm.obj_lock); 676 677 assert_bind_count(obj); 678 } 679 680 return 0; 681 682 err_clear: 683 vma->ops->clear_pages(vma); 684 err_unpin: 685 if (vma->obj) 686 i915_gem_object_unpin_pages(vma->obj); 687 return ret; 688 } 689 690 static void 691 i915_vma_remove(struct i915_vma *vma) 692 { 693 struct drm_i915_private *i915 = vma->vm->i915; 694 695 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 696 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 697 698 vma->ops->clear_pages(vma); 699 700 mutex_lock(&vma->vm->mutex); 701 drm_mm_remove_node(&vma->node); 702 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 703 mutex_unlock(&vma->vm->mutex); 704 705 /* 706 * Since the unbound list is global, only move to that list if 707 * no more VMAs exist. 708 */ 709 if (vma->obj) { 710 struct drm_i915_gem_object *obj = vma->obj; 711 712 spin_lock(&i915->mm.obj_lock); 713 if (--obj->bind_count == 0) 714 list_move_tail(&obj->mm.link, &i915->mm.unbound_list); 715 spin_unlock(&i915->mm.obj_lock); 716 717 /* 718 * And finally now the object is completely decoupled from this 719 * vma, we can drop its hold on the backing storage and allow 720 * it to be reaped by the shrinker. 721 */ 722 i915_gem_object_unpin_pages(obj); 723 assert_bind_count(obj); 724 } 725 } 726 727 int __i915_vma_do_pin(struct i915_vma *vma, 728 u64 size, u64 alignment, u64 flags) 729 { 730 const unsigned int bound = vma->flags; 731 int ret; 732 733 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 734 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 735 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 736 737 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 738 ret = -EBUSY; 739 goto err_unpin; 740 } 741 742 if ((bound & I915_VMA_BIND_MASK) == 0) { 743 ret = i915_vma_insert(vma, size, alignment, flags); 744 if (ret) 745 goto err_unpin; 746 } 747 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 748 749 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); 750 if (ret) 751 goto err_remove; 752 753 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); 754 755 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 756 __i915_vma_set_map_and_fenceable(vma); 757 758 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 759 return 0; 760 761 err_remove: 762 if ((bound & I915_VMA_BIND_MASK) == 0) { 763 i915_vma_remove(vma); 764 GEM_BUG_ON(vma->pages); 765 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); 766 } 767 err_unpin: 768 __i915_vma_unpin(vma); 769 return ret; 770 } 771 772 void i915_vma_close(struct i915_vma *vma) 773 { 774 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 775 776 GEM_BUG_ON(i915_vma_is_closed(vma)); 777 vma->flags |= I915_VMA_CLOSED; 778 779 /* 780 * We defer actually closing, unbinding and destroying the VMA until 781 * the next idle point, or if the object is freed in the meantime. By 782 * postponing the unbind, we allow for it to be resurrected by the 783 * client, avoiding the work required to rebind the VMA. This is 784 * advantageous for DRI, where the client/server pass objects 785 * between themselves, temporarily opening a local VMA to the 786 * object, and then closing it again. The same object is then reused 787 * on the next frame (or two, depending on the depth of the swap queue) 788 * causing us to rebind the VMA once more. This ends up being a lot 789 * of wasted work for the steady state. 790 */ 791 list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); 792 } 793 794 void i915_vma_reopen(struct i915_vma *vma) 795 { 796 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 797 798 if (vma->flags & I915_VMA_CLOSED) { 799 vma->flags &= ~I915_VMA_CLOSED; 800 list_del(&vma->closed_link); 801 } 802 } 803 804 static void __i915_vma_destroy(struct i915_vma *vma) 805 { 806 struct drm_i915_private *i915 = vma->vm->i915; 807 808 GEM_BUG_ON(vma->node.allocated); 809 GEM_BUG_ON(vma->fence); 810 811 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence)); 812 813 mutex_lock(&vma->vm->mutex); 814 list_del(&vma->vm_link); 815 mutex_unlock(&vma->vm->mutex); 816 817 if (vma->obj) { 818 struct drm_i915_gem_object *obj = vma->obj; 819 820 spin_lock(&obj->vma.lock); 821 list_del(&vma->obj_link); 822 rb_erase(&vma->obj_node, &vma->obj->vma.tree); 823 spin_unlock(&obj->vma.lock); 824 } 825 826 i915_active_fini(&vma->active); 827 828 kmem_cache_free(i915->vmas, vma); 829 } 830 831 void i915_vma_destroy(struct i915_vma *vma) 832 { 833 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 834 835 GEM_BUG_ON(i915_vma_is_active(vma)); 836 GEM_BUG_ON(i915_vma_is_pinned(vma)); 837 838 if (i915_vma_is_closed(vma)) 839 list_del(&vma->closed_link); 840 841 WARN_ON(i915_vma_unbind(vma)); 842 __i915_vma_destroy(vma); 843 } 844 845 void i915_vma_parked(struct drm_i915_private *i915) 846 { 847 struct i915_vma *vma, *next; 848 849 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { 850 GEM_BUG_ON(!i915_vma_is_closed(vma)); 851 i915_vma_destroy(vma); 852 } 853 854 GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); 855 } 856 857 static void __i915_vma_iounmap(struct i915_vma *vma) 858 { 859 GEM_BUG_ON(i915_vma_is_pinned(vma)); 860 861 if (vma->iomap == NULL) 862 return; 863 864 io_mapping_unmap(vma->iomap); 865 vma->iomap = NULL; 866 } 867 868 void i915_vma_revoke_mmap(struct i915_vma *vma) 869 { 870 struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 871 u64 vma_offset; 872 873 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 874 875 if (!i915_vma_has_userfault(vma)) 876 return; 877 878 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 879 GEM_BUG_ON(!vma->obj->userfault_count); 880 881 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 882 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 883 drm_vma_node_offset_addr(node) + vma_offset, 884 vma->size, 885 1); 886 887 i915_vma_unset_userfault(vma); 888 if (!--vma->obj->userfault_count) 889 list_del(&vma->obj->userfault_link); 890 } 891 892 static void export_fence(struct i915_vma *vma, 893 struct i915_request *rq, 894 unsigned int flags) 895 { 896 struct reservation_object *resv = vma->resv; 897 898 /* 899 * Ignore errors from failing to allocate the new fence, we can't 900 * handle an error right now. Worst case should be missed 901 * synchronisation leading to rendering corruption. 902 */ 903 reservation_object_lock(resv, NULL); 904 if (flags & EXEC_OBJECT_WRITE) 905 reservation_object_add_excl_fence(resv, &rq->fence); 906 else if (reservation_object_reserve_shared(resv, 1) == 0) 907 reservation_object_add_shared_fence(resv, &rq->fence); 908 reservation_object_unlock(resv); 909 } 910 911 int i915_vma_move_to_active(struct i915_vma *vma, 912 struct i915_request *rq, 913 unsigned int flags) 914 { 915 struct drm_i915_gem_object *obj = vma->obj; 916 917 lockdep_assert_held(&rq->i915->drm.struct_mutex); 918 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 919 920 /* 921 * Add a reference if we're newly entering the active list. 922 * The order in which we add operations to the retirement queue is 923 * vital here: mark_active adds to the start of the callback list, 924 * such that subsequent callbacks are called first. Therefore we 925 * add the active reference first and queue for it to be dropped 926 * *last*. 927 */ 928 if (!vma->active.count) 929 obj->active_count++; 930 931 if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { 932 if (!vma->active.count) 933 obj->active_count--; 934 return -ENOMEM; 935 } 936 937 GEM_BUG_ON(!i915_vma_is_active(vma)); 938 GEM_BUG_ON(!obj->active_count); 939 940 obj->write_domain = 0; 941 if (flags & EXEC_OBJECT_WRITE) { 942 obj->write_domain = I915_GEM_DOMAIN_RENDER; 943 944 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 945 __i915_active_request_set(&obj->frontbuffer_write, rq); 946 947 obj->read_domains = 0; 948 } 949 obj->read_domains |= I915_GEM_GPU_DOMAINS; 950 951 if (flags & EXEC_OBJECT_NEEDS_FENCE) 952 __i915_active_request_set(&vma->last_fence, rq); 953 954 export_fence(vma, rq, flags); 955 return 0; 956 } 957 958 int i915_vma_unbind(struct i915_vma *vma) 959 { 960 int ret; 961 962 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 963 964 /* 965 * First wait upon any activity as retiring the request may 966 * have side-effects such as unpinning or even unbinding this vma. 967 */ 968 might_sleep(); 969 if (i915_vma_is_active(vma)) { 970 /* 971 * When a closed VMA is retired, it is unbound - eek. 972 * In order to prevent it from being recursively closed, 973 * take a pin on the vma so that the second unbind is 974 * aborted. 975 * 976 * Even more scary is that the retire callback may free 977 * the object (last active vma). To prevent the explosion 978 * we defer the actual object free to a worker that can 979 * only proceed once it acquires the struct_mutex (which 980 * we currently hold, therefore it cannot free this object 981 * before we are finished). 982 */ 983 __i915_vma_pin(vma); 984 985 ret = i915_active_wait(&vma->active); 986 if (ret) 987 goto unpin; 988 989 ret = i915_active_request_retire(&vma->last_fence, 990 &vma->vm->i915->drm.struct_mutex); 991 unpin: 992 __i915_vma_unpin(vma); 993 if (ret) 994 return ret; 995 } 996 GEM_BUG_ON(i915_vma_is_active(vma)); 997 998 if (i915_vma_is_pinned(vma)) { 999 vma_print_allocator(vma, "is pinned"); 1000 return -EBUSY; 1001 } 1002 1003 if (!drm_mm_node_allocated(&vma->node)) 1004 return 0; 1005 1006 if (i915_vma_is_map_and_fenceable(vma)) { 1007 /* 1008 * Check that we have flushed all writes through the GGTT 1009 * before the unbind, other due to non-strict nature of those 1010 * indirect writes they may end up referencing the GGTT PTE 1011 * after the unbind. 1012 */ 1013 i915_vma_flush_writes(vma); 1014 GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); 1015 1016 /* release the fence reg _after_ flushing */ 1017 ret = i915_vma_put_fence(vma); 1018 if (ret) 1019 return ret; 1020 1021 /* Force a pagefault for domain tracking on next user access */ 1022 i915_vma_revoke_mmap(vma); 1023 1024 __i915_vma_iounmap(vma); 1025 vma->flags &= ~I915_VMA_CAN_FENCE; 1026 } 1027 GEM_BUG_ON(vma->fence); 1028 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1029 1030 if (likely(!vma->vm->closed)) { 1031 trace_i915_vma_unbind(vma); 1032 vma->ops->unbind_vma(vma); 1033 } 1034 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 1035 1036 i915_vma_remove(vma); 1037 1038 return 0; 1039 } 1040 1041 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1042 #include "selftests/i915_vma.c" 1043 #endif 1044