1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_vma.h" 26 27 #include "i915_drv.h" 28 #include "intel_ringbuffer.h" 29 #include "intel_frontbuffer.h" 30 31 #include <drm/drm_gem.h> 32 33 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 34 35 #include <linux/stackdepot.h> 36 37 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 38 { 39 unsigned long entries[12]; 40 struct stack_trace trace = { 41 .entries = entries, 42 .max_entries = ARRAY_SIZE(entries), 43 }; 44 char buf[512]; 45 46 if (!vma->node.stack) { 47 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 48 vma->node.start, vma->node.size, reason); 49 return; 50 } 51 52 depot_fetch_stack(vma->node.stack, &trace); 53 snprint_stack_trace(buf, sizeof(buf), &trace, 0); 54 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 55 vma->node.start, vma->node.size, reason, buf); 56 } 57 58 #else 59 60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 61 { 62 } 63 64 #endif 65 66 struct i915_vma_active { 67 struct i915_gem_active base; 68 struct i915_vma *vma; 69 struct rb_node node; 70 u64 timeline; 71 }; 72 73 static void 74 __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) 75 { 76 struct drm_i915_gem_object *obj = vma->obj; 77 78 GEM_BUG_ON(!i915_vma_is_active(vma)); 79 if (--vma->active_count) 80 return; 81 82 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 83 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 84 85 GEM_BUG_ON(!i915_gem_object_is_active(obj)); 86 if (--obj->active_count) 87 return; 88 89 /* Prune the shared fence arrays iff completely idle (inc. external) */ 90 if (reservation_object_trylock(obj->resv)) { 91 if (reservation_object_test_signaled_rcu(obj->resv, true)) 92 reservation_object_add_excl_fence(obj->resv, NULL); 93 reservation_object_unlock(obj->resv); 94 } 95 96 /* Bump our place on the bound list to keep it roughly in LRU order 97 * so that we don't steal from recently used but inactive objects 98 * (unless we are forced to ofc!) 99 */ 100 spin_lock(&rq->i915->mm.obj_lock); 101 if (obj->bind_count) 102 list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list); 103 spin_unlock(&rq->i915->mm.obj_lock); 104 105 obj->mm.dirty = true; /* be paranoid */ 106 107 if (i915_gem_object_has_active_reference(obj)) { 108 i915_gem_object_clear_active_reference(obj); 109 i915_gem_object_put(obj); 110 } 111 } 112 113 static void 114 i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) 115 { 116 struct i915_vma_active *active = 117 container_of(base, typeof(*active), base); 118 119 __i915_vma_retire(active->vma, rq); 120 } 121 122 static void 123 i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq) 124 { 125 __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq); 126 } 127 128 static struct i915_vma * 129 vma_create(struct drm_i915_gem_object *obj, 130 struct i915_address_space *vm, 131 const struct i915_ggtt_view *view) 132 { 133 struct i915_vma *vma; 134 struct rb_node *rb, **p; 135 136 /* The aliasing_ppgtt should never be used directly! */ 137 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 138 139 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); 140 if (vma == NULL) 141 return ERR_PTR(-ENOMEM); 142 143 vma->active = RB_ROOT; 144 145 init_request_active(&vma->last_active, i915_vma_last_retire); 146 init_request_active(&vma->last_fence, NULL); 147 vma->vm = vm; 148 vma->ops = &vm->vma_ops; 149 vma->obj = obj; 150 vma->resv = obj->resv; 151 vma->size = obj->base.size; 152 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 153 154 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 155 vma->ggtt_view = *view; 156 if (view->type == I915_GGTT_VIEW_PARTIAL) { 157 GEM_BUG_ON(range_overflows_t(u64, 158 view->partial.offset, 159 view->partial.size, 160 obj->base.size >> PAGE_SHIFT)); 161 vma->size = view->partial.size; 162 vma->size <<= PAGE_SHIFT; 163 GEM_BUG_ON(vma->size > obj->base.size); 164 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 165 vma->size = intel_rotation_info_size(&view->rotated); 166 vma->size <<= PAGE_SHIFT; 167 } 168 } 169 170 if (unlikely(vma->size > vm->total)) 171 goto err_vma; 172 173 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 174 175 if (i915_is_ggtt(vm)) { 176 if (unlikely(overflows_type(vma->size, u32))) 177 goto err_vma; 178 179 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 180 i915_gem_object_get_tiling(obj), 181 i915_gem_object_get_stride(obj)); 182 if (unlikely(vma->fence_size < vma->size || /* overflow */ 183 vma->fence_size > vm->total)) 184 goto err_vma; 185 186 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 187 188 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 189 i915_gem_object_get_tiling(obj), 190 i915_gem_object_get_stride(obj)); 191 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 192 193 /* 194 * We put the GGTT vma at the start of the vma-list, followed 195 * by the ppGGTT vma. This allows us to break early when 196 * iterating over only the GGTT vma for an object, see 197 * for_each_ggtt_vma() 198 */ 199 vma->flags |= I915_VMA_GGTT; 200 list_add(&vma->obj_link, &obj->vma_list); 201 } else { 202 list_add_tail(&vma->obj_link, &obj->vma_list); 203 } 204 205 rb = NULL; 206 p = &obj->vma_tree.rb_node; 207 while (*p) { 208 struct i915_vma *pos; 209 210 rb = *p; 211 pos = rb_entry(rb, struct i915_vma, obj_node); 212 if (i915_vma_compare(pos, vm, view) < 0) 213 p = &rb->rb_right; 214 else 215 p = &rb->rb_left; 216 } 217 rb_link_node(&vma->obj_node, rb, p); 218 rb_insert_color(&vma->obj_node, &obj->vma_tree); 219 list_add(&vma->vm_link, &vm->unbound_list); 220 221 return vma; 222 223 err_vma: 224 kmem_cache_free(vm->i915->vmas, vma); 225 return ERR_PTR(-E2BIG); 226 } 227 228 static struct i915_vma * 229 vma_lookup(struct drm_i915_gem_object *obj, 230 struct i915_address_space *vm, 231 const struct i915_ggtt_view *view) 232 { 233 struct rb_node *rb; 234 235 rb = obj->vma_tree.rb_node; 236 while (rb) { 237 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 238 long cmp; 239 240 cmp = i915_vma_compare(vma, vm, view); 241 if (cmp == 0) 242 return vma; 243 244 if (cmp < 0) 245 rb = rb->rb_right; 246 else 247 rb = rb->rb_left; 248 } 249 250 return NULL; 251 } 252 253 /** 254 * i915_vma_instance - return the singleton instance of the VMA 255 * @obj: parent &struct drm_i915_gem_object to be mapped 256 * @vm: address space in which the mapping is located 257 * @view: additional mapping requirements 258 * 259 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 260 * the same @view characteristics. If a match is not found, one is created. 261 * Once created, the VMA is kept until either the object is freed, or the 262 * address space is closed. 263 * 264 * Must be called with struct_mutex held. 265 * 266 * Returns the vma, or an error pointer. 267 */ 268 struct i915_vma * 269 i915_vma_instance(struct drm_i915_gem_object *obj, 270 struct i915_address_space *vm, 271 const struct i915_ggtt_view *view) 272 { 273 struct i915_vma *vma; 274 275 lockdep_assert_held(&obj->base.dev->struct_mutex); 276 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 277 GEM_BUG_ON(vm->closed); 278 279 vma = vma_lookup(obj, vm, view); 280 if (!vma) 281 vma = vma_create(obj, vm, view); 282 283 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 284 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma); 285 return vma; 286 } 287 288 /** 289 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 290 * @vma: VMA to map 291 * @cache_level: mapping cache level 292 * @flags: flags like global or local mapping 293 * 294 * DMA addresses are taken from the scatter-gather table of this object (or of 295 * this VMA in case of non-default GGTT views) and PTE entries set up. 296 * Note that DMA addresses are also the only part of the SG table we care about. 297 */ 298 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 299 u32 flags) 300 { 301 u32 bind_flags; 302 u32 vma_flags; 303 int ret; 304 305 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 306 GEM_BUG_ON(vma->size > vma->node.size); 307 308 if (GEM_WARN_ON(range_overflows(vma->node.start, 309 vma->node.size, 310 vma->vm->total))) 311 return -ENODEV; 312 313 if (GEM_WARN_ON(!flags)) 314 return -EINVAL; 315 316 bind_flags = 0; 317 if (flags & PIN_GLOBAL) 318 bind_flags |= I915_VMA_GLOBAL_BIND; 319 if (flags & PIN_USER) 320 bind_flags |= I915_VMA_LOCAL_BIND; 321 322 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 323 if (flags & PIN_UPDATE) 324 bind_flags |= vma_flags; 325 else 326 bind_flags &= ~vma_flags; 327 if (bind_flags == 0) 328 return 0; 329 330 GEM_BUG_ON(!vma->pages); 331 332 trace_i915_vma_bind(vma, bind_flags); 333 ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 334 if (ret) 335 return ret; 336 337 vma->flags |= bind_flags; 338 return 0; 339 } 340 341 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 342 { 343 void __iomem *ptr; 344 int err; 345 346 /* Access through the GTT requires the device to be awake. */ 347 assert_rpm_wakelock_held(vma->vm->i915); 348 349 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 350 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 351 err = -ENODEV; 352 goto err; 353 } 354 355 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 356 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 357 358 ptr = vma->iomap; 359 if (ptr == NULL) { 360 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 361 vma->node.start, 362 vma->node.size); 363 if (ptr == NULL) { 364 err = -ENOMEM; 365 goto err; 366 } 367 368 vma->iomap = ptr; 369 } 370 371 __i915_vma_pin(vma); 372 373 err = i915_vma_pin_fence(vma); 374 if (err) 375 goto err_unpin; 376 377 i915_vma_set_ggtt_write(vma); 378 return ptr; 379 380 err_unpin: 381 __i915_vma_unpin(vma); 382 err: 383 return IO_ERR_PTR(err); 384 } 385 386 void i915_vma_flush_writes(struct i915_vma *vma) 387 { 388 if (!i915_vma_has_ggtt_write(vma)) 389 return; 390 391 i915_gem_flush_ggtt_writes(vma->vm->i915); 392 393 i915_vma_unset_ggtt_write(vma); 394 } 395 396 void i915_vma_unpin_iomap(struct i915_vma *vma) 397 { 398 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 399 400 GEM_BUG_ON(vma->iomap == NULL); 401 402 i915_vma_flush_writes(vma); 403 404 i915_vma_unpin_fence(vma); 405 i915_vma_unpin(vma); 406 } 407 408 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 409 { 410 struct i915_vma *vma; 411 struct drm_i915_gem_object *obj; 412 413 vma = fetch_and_zero(p_vma); 414 if (!vma) 415 return; 416 417 obj = vma->obj; 418 GEM_BUG_ON(!obj); 419 420 i915_vma_unpin(vma); 421 i915_vma_close(vma); 422 423 if (flags & I915_VMA_RELEASE_MAP) 424 i915_gem_object_unpin_map(obj); 425 426 __i915_gem_object_release_unless_active(obj); 427 } 428 429 bool i915_vma_misplaced(const struct i915_vma *vma, 430 u64 size, u64 alignment, u64 flags) 431 { 432 if (!drm_mm_node_allocated(&vma->node)) 433 return false; 434 435 if (vma->node.size < size) 436 return true; 437 438 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 439 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 440 return true; 441 442 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 443 return true; 444 445 if (flags & PIN_OFFSET_BIAS && 446 vma->node.start < (flags & PIN_OFFSET_MASK)) 447 return true; 448 449 if (flags & PIN_OFFSET_FIXED && 450 vma->node.start != (flags & PIN_OFFSET_MASK)) 451 return true; 452 453 return false; 454 } 455 456 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 457 { 458 bool mappable, fenceable; 459 460 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 461 GEM_BUG_ON(!vma->fence_size); 462 463 /* 464 * Explicitly disable for rotated VMA since the display does not 465 * need the fence and the VMA is not accessible to other users. 466 */ 467 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 468 return; 469 470 fenceable = (vma->node.size >= vma->fence_size && 471 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 472 473 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 474 475 if (mappable && fenceable) 476 vma->flags |= I915_VMA_CAN_FENCE; 477 else 478 vma->flags &= ~I915_VMA_CAN_FENCE; 479 } 480 481 static bool color_differs(struct drm_mm_node *node, unsigned long color) 482 { 483 return node->allocated && node->color != color; 484 } 485 486 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) 487 { 488 struct drm_mm_node *node = &vma->node; 489 struct drm_mm_node *other; 490 491 /* 492 * On some machines we have to be careful when putting differing types 493 * of snoopable memory together to avoid the prefetcher crossing memory 494 * domains and dying. During vm initialisation, we decide whether or not 495 * these constraints apply and set the drm_mm.color_adjust 496 * appropriately. 497 */ 498 if (vma->vm->mm.color_adjust == NULL) 499 return true; 500 501 /* Only valid to be called on an already inserted vma */ 502 GEM_BUG_ON(!drm_mm_node_allocated(node)); 503 GEM_BUG_ON(list_empty(&node->node_list)); 504 505 other = list_prev_entry(node, node_list); 506 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) 507 return false; 508 509 other = list_next_entry(node, node_list); 510 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) 511 return false; 512 513 return true; 514 } 515 516 static void assert_bind_count(const struct drm_i915_gem_object *obj) 517 { 518 /* 519 * Combine the assertion that the object is bound and that we have 520 * pinned its pages. But we should never have bound the object 521 * more than we have pinned its pages. (For complete accuracy, we 522 * assume that no else is pinning the pages, but as a rough assertion 523 * that we will not run into problems later, this will do!) 524 */ 525 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 526 } 527 528 /** 529 * i915_vma_insert - finds a slot for the vma in its address space 530 * @vma: the vma 531 * @size: requested size in bytes (can be larger than the VMA) 532 * @alignment: required alignment 533 * @flags: mask of PIN_* flags to use 534 * 535 * First we try to allocate some free space that meets the requirements for 536 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 537 * preferrably the oldest idle entry to make room for the new VMA. 538 * 539 * Returns: 540 * 0 on success, negative error code otherwise. 541 */ 542 static int 543 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 544 { 545 struct drm_i915_private *dev_priv = vma->vm->i915; 546 unsigned int cache_level; 547 u64 start, end; 548 int ret; 549 550 GEM_BUG_ON(i915_vma_is_closed(vma)); 551 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 552 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 553 554 size = max(size, vma->size); 555 alignment = max(alignment, vma->display_alignment); 556 if (flags & PIN_MAPPABLE) { 557 size = max_t(typeof(size), size, vma->fence_size); 558 alignment = max_t(typeof(alignment), 559 alignment, vma->fence_alignment); 560 } 561 562 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 563 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 564 GEM_BUG_ON(!is_power_of_2(alignment)); 565 566 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 567 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 568 569 end = vma->vm->total; 570 if (flags & PIN_MAPPABLE) 571 end = min_t(u64, end, dev_priv->ggtt.mappable_end); 572 if (flags & PIN_ZONE_4G) 573 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 574 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 575 576 /* If binding the object/GGTT view requires more space than the entire 577 * aperture has, reject it early before evicting everything in a vain 578 * attempt to find space. 579 */ 580 if (size > end) { 581 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 582 size, flags & PIN_MAPPABLE ? "mappable" : "total", 583 end); 584 return -ENOSPC; 585 } 586 587 if (vma->obj) { 588 ret = i915_gem_object_pin_pages(vma->obj); 589 if (ret) 590 return ret; 591 592 cache_level = vma->obj->cache_level; 593 } else { 594 cache_level = 0; 595 } 596 597 GEM_BUG_ON(vma->pages); 598 599 ret = vma->ops->set_pages(vma); 600 if (ret) 601 goto err_unpin; 602 603 if (flags & PIN_OFFSET_FIXED) { 604 u64 offset = flags & PIN_OFFSET_MASK; 605 if (!IS_ALIGNED(offset, alignment) || 606 range_overflows(offset, size, end)) { 607 ret = -EINVAL; 608 goto err_clear; 609 } 610 611 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 612 size, offset, cache_level, 613 flags); 614 if (ret) 615 goto err_clear; 616 } else { 617 /* 618 * We only support huge gtt pages through the 48b PPGTT, 619 * however we also don't want to force any alignment for 620 * objects which need to be tightly packed into the low 32bits. 621 * 622 * Note that we assume that GGTT are limited to 4GiB for the 623 * forseeable future. See also i915_ggtt_offset(). 624 */ 625 if (upper_32_bits(end - 1) && 626 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 627 /* 628 * We can't mix 64K and 4K PTEs in the same page-table 629 * (2M block), and so to avoid the ugliness and 630 * complexity of coloring we opt for just aligning 64K 631 * objects to 2M. 632 */ 633 u64 page_alignment = 634 rounddown_pow_of_two(vma->page_sizes.sg | 635 I915_GTT_PAGE_SIZE_2M); 636 637 /* 638 * Check we don't expand for the limited Global GTT 639 * (mappable aperture is even more precious!). This 640 * also checks that we exclude the aliasing-ppgtt. 641 */ 642 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 643 644 alignment = max(alignment, page_alignment); 645 646 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 647 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 648 } 649 650 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 651 size, alignment, cache_level, 652 start, end, flags); 653 if (ret) 654 goto err_clear; 655 656 GEM_BUG_ON(vma->node.start < start); 657 GEM_BUG_ON(vma->node.start + vma->node.size > end); 658 } 659 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 660 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); 661 662 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 663 664 if (vma->obj) { 665 struct drm_i915_gem_object *obj = vma->obj; 666 667 spin_lock(&dev_priv->mm.obj_lock); 668 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); 669 obj->bind_count++; 670 spin_unlock(&dev_priv->mm.obj_lock); 671 672 assert_bind_count(obj); 673 } 674 675 return 0; 676 677 err_clear: 678 vma->ops->clear_pages(vma); 679 err_unpin: 680 if (vma->obj) 681 i915_gem_object_unpin_pages(vma->obj); 682 return ret; 683 } 684 685 static void 686 i915_vma_remove(struct i915_vma *vma) 687 { 688 struct drm_i915_private *i915 = vma->vm->i915; 689 690 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 691 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 692 693 vma->ops->clear_pages(vma); 694 695 drm_mm_remove_node(&vma->node); 696 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 697 698 /* 699 * Since the unbound list is global, only move to that list if 700 * no more VMAs exist. 701 */ 702 if (vma->obj) { 703 struct drm_i915_gem_object *obj = vma->obj; 704 705 spin_lock(&i915->mm.obj_lock); 706 if (--obj->bind_count == 0) 707 list_move_tail(&obj->mm.link, &i915->mm.unbound_list); 708 spin_unlock(&i915->mm.obj_lock); 709 710 /* 711 * And finally now the object is completely decoupled from this 712 * vma, we can drop its hold on the backing storage and allow 713 * it to be reaped by the shrinker. 714 */ 715 i915_gem_object_unpin_pages(obj); 716 assert_bind_count(obj); 717 } 718 } 719 720 int __i915_vma_do_pin(struct i915_vma *vma, 721 u64 size, u64 alignment, u64 flags) 722 { 723 const unsigned int bound = vma->flags; 724 int ret; 725 726 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 727 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 728 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 729 730 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 731 ret = -EBUSY; 732 goto err_unpin; 733 } 734 735 if ((bound & I915_VMA_BIND_MASK) == 0) { 736 ret = i915_vma_insert(vma, size, alignment, flags); 737 if (ret) 738 goto err_unpin; 739 } 740 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 741 742 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); 743 if (ret) 744 goto err_remove; 745 746 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); 747 748 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 749 __i915_vma_set_map_and_fenceable(vma); 750 751 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 752 return 0; 753 754 err_remove: 755 if ((bound & I915_VMA_BIND_MASK) == 0) { 756 i915_vma_remove(vma); 757 GEM_BUG_ON(vma->pages); 758 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); 759 } 760 err_unpin: 761 __i915_vma_unpin(vma); 762 return ret; 763 } 764 765 void i915_vma_close(struct i915_vma *vma) 766 { 767 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 768 769 GEM_BUG_ON(i915_vma_is_closed(vma)); 770 vma->flags |= I915_VMA_CLOSED; 771 772 /* 773 * We defer actually closing, unbinding and destroying the VMA until 774 * the next idle point, or if the object is freed in the meantime. By 775 * postponing the unbind, we allow for it to be resurrected by the 776 * client, avoiding the work required to rebind the VMA. This is 777 * advantageous for DRI, where the client/server pass objects 778 * between themselves, temporarily opening a local VMA to the 779 * object, and then closing it again. The same object is then reused 780 * on the next frame (or two, depending on the depth of the swap queue) 781 * causing us to rebind the VMA once more. This ends up being a lot 782 * of wasted work for the steady state. 783 */ 784 list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); 785 } 786 787 void i915_vma_reopen(struct i915_vma *vma) 788 { 789 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 790 791 if (vma->flags & I915_VMA_CLOSED) { 792 vma->flags &= ~I915_VMA_CLOSED; 793 list_del(&vma->closed_link); 794 } 795 } 796 797 static void __i915_vma_destroy(struct i915_vma *vma) 798 { 799 struct drm_i915_private *i915 = vma->vm->i915; 800 struct i915_vma_active *iter, *n; 801 802 GEM_BUG_ON(vma->node.allocated); 803 GEM_BUG_ON(vma->fence); 804 805 GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); 806 807 list_del(&vma->obj_link); 808 list_del(&vma->vm_link); 809 if (vma->obj) 810 rb_erase(&vma->obj_node, &vma->obj->vma_tree); 811 812 rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { 813 GEM_BUG_ON(i915_gem_active_isset(&iter->base)); 814 kfree(iter); 815 } 816 817 kmem_cache_free(i915->vmas, vma); 818 } 819 820 void i915_vma_destroy(struct i915_vma *vma) 821 { 822 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 823 824 GEM_BUG_ON(i915_vma_is_active(vma)); 825 GEM_BUG_ON(i915_vma_is_pinned(vma)); 826 827 if (i915_vma_is_closed(vma)) 828 list_del(&vma->closed_link); 829 830 WARN_ON(i915_vma_unbind(vma)); 831 __i915_vma_destroy(vma); 832 } 833 834 void i915_vma_parked(struct drm_i915_private *i915) 835 { 836 struct i915_vma *vma, *next; 837 838 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { 839 GEM_BUG_ON(!i915_vma_is_closed(vma)); 840 i915_vma_destroy(vma); 841 } 842 843 GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); 844 } 845 846 static void __i915_vma_iounmap(struct i915_vma *vma) 847 { 848 GEM_BUG_ON(i915_vma_is_pinned(vma)); 849 850 if (vma->iomap == NULL) 851 return; 852 853 io_mapping_unmap(vma->iomap); 854 vma->iomap = NULL; 855 } 856 857 void i915_vma_revoke_mmap(struct i915_vma *vma) 858 { 859 struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 860 u64 vma_offset; 861 862 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 863 864 if (!i915_vma_has_userfault(vma)) 865 return; 866 867 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 868 GEM_BUG_ON(!vma->obj->userfault_count); 869 870 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 871 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 872 drm_vma_node_offset_addr(node) + vma_offset, 873 vma->size, 874 1); 875 876 i915_vma_unset_userfault(vma); 877 if (!--vma->obj->userfault_count) 878 list_del(&vma->obj->userfault_link); 879 } 880 881 static void export_fence(struct i915_vma *vma, 882 struct i915_request *rq, 883 unsigned int flags) 884 { 885 struct reservation_object *resv = vma->resv; 886 887 /* 888 * Ignore errors from failing to allocate the new fence, we can't 889 * handle an error right now. Worst case should be missed 890 * synchronisation leading to rendering corruption. 891 */ 892 reservation_object_lock(resv, NULL); 893 if (flags & EXEC_OBJECT_WRITE) 894 reservation_object_add_excl_fence(resv, &rq->fence); 895 else if (reservation_object_reserve_shared(resv) == 0) 896 reservation_object_add_shared_fence(resv, &rq->fence); 897 reservation_object_unlock(resv); 898 } 899 900 static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) 901 { 902 struct i915_vma_active *active; 903 struct rb_node **p, *parent; 904 struct i915_request *old; 905 906 /* 907 * We track the most recently used timeline to skip a rbtree search 908 * for the common case, under typical loads we never need the rbtree 909 * at all. We can reuse the last_active slot if it is empty, that is 910 * after the previous activity has been retired, or if the active 911 * matches the current timeline. 912 * 913 * Note that we allow the timeline to be active simultaneously in 914 * the rbtree and the last_active cache. We do this to avoid having 915 * to search and replace the rbtree element for a new timeline, with 916 * the cost being that we must be aware that the vma may be retired 917 * twice for the same timeline (as the older rbtree element will be 918 * retired before the new request added to last_active). 919 */ 920 old = i915_gem_active_raw(&vma->last_active, 921 &vma->vm->i915->drm.struct_mutex); 922 if (!old || old->fence.context == idx) 923 goto out; 924 925 /* Move the currently active fence into the rbtree */ 926 idx = old->fence.context; 927 928 parent = NULL; 929 p = &vma->active.rb_node; 930 while (*p) { 931 parent = *p; 932 933 active = rb_entry(parent, struct i915_vma_active, node); 934 if (active->timeline == idx) 935 goto replace; 936 937 if (active->timeline < idx) 938 p = &parent->rb_right; 939 else 940 p = &parent->rb_left; 941 } 942 943 active = kmalloc(sizeof(*active), GFP_KERNEL); 944 945 /* kmalloc may retire the vma->last_active request (thanks shrinker)! */ 946 if (unlikely(!i915_gem_active_raw(&vma->last_active, 947 &vma->vm->i915->drm.struct_mutex))) { 948 kfree(active); 949 goto out; 950 } 951 952 if (unlikely(!active)) 953 return ERR_PTR(-ENOMEM); 954 955 init_request_active(&active->base, i915_vma_retire); 956 active->vma = vma; 957 active->timeline = idx; 958 959 rb_link_node(&active->node, parent, p); 960 rb_insert_color(&active->node, &vma->active); 961 962 replace: 963 /* 964 * Overwrite the previous active slot in the rbtree with last_active, 965 * leaving last_active zeroed. If the previous slot is still active, 966 * we must be careful as we now only expect to receive one retire 967 * callback not two, and so much undo the active counting for the 968 * overwritten slot. 969 */ 970 if (i915_gem_active_isset(&active->base)) { 971 /* Retire ourselves from the old rq->active_list */ 972 __list_del_entry(&active->base.link); 973 vma->active_count--; 974 GEM_BUG_ON(!vma->active_count); 975 } 976 GEM_BUG_ON(list_empty(&vma->last_active.link)); 977 list_replace_init(&vma->last_active.link, &active->base.link); 978 active->base.request = fetch_and_zero(&vma->last_active.request); 979 980 out: 981 return &vma->last_active; 982 } 983 984 int i915_vma_move_to_active(struct i915_vma *vma, 985 struct i915_request *rq, 986 unsigned int flags) 987 { 988 struct drm_i915_gem_object *obj = vma->obj; 989 struct i915_gem_active *active; 990 991 lockdep_assert_held(&rq->i915->drm.struct_mutex); 992 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 993 994 active = active_instance(vma, rq->fence.context); 995 if (IS_ERR(active)) 996 return PTR_ERR(active); 997 998 /* 999 * Add a reference if we're newly entering the active list. 1000 * The order in which we add operations to the retirement queue is 1001 * vital here: mark_active adds to the start of the callback list, 1002 * such that subsequent callbacks are called first. Therefore we 1003 * add the active reference first and queue for it to be dropped 1004 * *last*. 1005 */ 1006 if (!i915_gem_active_isset(active) && !vma->active_count++) { 1007 list_move_tail(&vma->vm_link, &vma->vm->active_list); 1008 obj->active_count++; 1009 } 1010 i915_gem_active_set(active, rq); 1011 GEM_BUG_ON(!i915_vma_is_active(vma)); 1012 GEM_BUG_ON(!obj->active_count); 1013 1014 obj->write_domain = 0; 1015 if (flags & EXEC_OBJECT_WRITE) { 1016 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1017 1018 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 1019 i915_gem_active_set(&obj->frontbuffer_write, rq); 1020 1021 obj->read_domains = 0; 1022 } 1023 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1024 1025 if (flags & EXEC_OBJECT_NEEDS_FENCE) 1026 i915_gem_active_set(&vma->last_fence, rq); 1027 1028 export_fence(vma, rq, flags); 1029 return 0; 1030 } 1031 1032 int i915_vma_unbind(struct i915_vma *vma) 1033 { 1034 int ret; 1035 1036 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 1037 1038 /* 1039 * First wait upon any activity as retiring the request may 1040 * have side-effects such as unpinning or even unbinding this vma. 1041 */ 1042 might_sleep(); 1043 if (i915_vma_is_active(vma)) { 1044 struct i915_vma_active *active, *n; 1045 1046 /* 1047 * When a closed VMA is retired, it is unbound - eek. 1048 * In order to prevent it from being recursively closed, 1049 * take a pin on the vma so that the second unbind is 1050 * aborted. 1051 * 1052 * Even more scary is that the retire callback may free 1053 * the object (last active vma). To prevent the explosion 1054 * we defer the actual object free to a worker that can 1055 * only proceed once it acquires the struct_mutex (which 1056 * we currently hold, therefore it cannot free this object 1057 * before we are finished). 1058 */ 1059 __i915_vma_pin(vma); 1060 1061 ret = i915_gem_active_retire(&vma->last_active, 1062 &vma->vm->i915->drm.struct_mutex); 1063 if (ret) 1064 goto unpin; 1065 1066 rbtree_postorder_for_each_entry_safe(active, n, 1067 &vma->active, node) { 1068 ret = i915_gem_active_retire(&active->base, 1069 &vma->vm->i915->drm.struct_mutex); 1070 if (ret) 1071 goto unpin; 1072 } 1073 1074 ret = i915_gem_active_retire(&vma->last_fence, 1075 &vma->vm->i915->drm.struct_mutex); 1076 unpin: 1077 __i915_vma_unpin(vma); 1078 if (ret) 1079 return ret; 1080 } 1081 GEM_BUG_ON(i915_vma_is_active(vma)); 1082 1083 if (i915_vma_is_pinned(vma)) { 1084 vma_print_allocator(vma, "is pinned"); 1085 return -EBUSY; 1086 } 1087 1088 if (!drm_mm_node_allocated(&vma->node)) 1089 return 0; 1090 1091 if (i915_vma_is_map_and_fenceable(vma)) { 1092 /* 1093 * Check that we have flushed all writes through the GGTT 1094 * before the unbind, other due to non-strict nature of those 1095 * indirect writes they may end up referencing the GGTT PTE 1096 * after the unbind. 1097 */ 1098 i915_vma_flush_writes(vma); 1099 GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); 1100 1101 /* release the fence reg _after_ flushing */ 1102 ret = i915_vma_put_fence(vma); 1103 if (ret) 1104 return ret; 1105 1106 /* Force a pagefault for domain tracking on next user access */ 1107 i915_vma_revoke_mmap(vma); 1108 1109 __i915_vma_iounmap(vma); 1110 vma->flags &= ~I915_VMA_CAN_FENCE; 1111 } 1112 GEM_BUG_ON(vma->fence); 1113 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1114 1115 if (likely(!vma->vm->closed)) { 1116 trace_i915_vma_unbind(vma); 1117 vma->ops->unbind_vma(vma); 1118 } 1119 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 1120 1121 i915_vma_remove(vma); 1122 1123 return 0; 1124 } 1125 1126 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1127 #include "selftests/i915_vma.c" 1128 #endif 1129