1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_vma.h" 26 27 #include "i915_drv.h" 28 #include "intel_ringbuffer.h" 29 #include "intel_frontbuffer.h" 30 31 #include <drm/drm_gem.h> 32 33 static void 34 i915_vma_retire(struct i915_gem_active *active, 35 struct drm_i915_gem_request *rq) 36 { 37 const unsigned int idx = rq->engine->id; 38 struct i915_vma *vma = 39 container_of(active, struct i915_vma, last_read[idx]); 40 struct drm_i915_gem_object *obj = vma->obj; 41 42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); 43 44 i915_vma_clear_active(vma, idx); 45 if (i915_vma_is_active(vma)) 46 return; 47 48 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 49 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 50 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) 51 WARN_ON(i915_vma_unbind(vma)); 52 53 GEM_BUG_ON(!i915_gem_object_is_active(obj)); 54 if (--obj->active_count) 55 return; 56 57 /* Prune the shared fence arrays iff completely idle (inc. external) */ 58 if (reservation_object_trylock(obj->resv)) { 59 if (reservation_object_test_signaled_rcu(obj->resv, true)) 60 reservation_object_add_excl_fence(obj->resv, NULL); 61 reservation_object_unlock(obj->resv); 62 } 63 64 /* Bump our place on the bound list to keep it roughly in LRU order 65 * so that we don't steal from recently used but inactive objects 66 * (unless we are forced to ofc!) 67 */ 68 spin_lock(&rq->i915->mm.obj_lock); 69 if (obj->bind_count) 70 list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list); 71 spin_unlock(&rq->i915->mm.obj_lock); 72 73 obj->mm.dirty = true; /* be paranoid */ 74 75 if (i915_gem_object_has_active_reference(obj)) { 76 i915_gem_object_clear_active_reference(obj); 77 i915_gem_object_put(obj); 78 } 79 } 80 81 static struct i915_vma * 82 vma_create(struct drm_i915_gem_object *obj, 83 struct i915_address_space *vm, 84 const struct i915_ggtt_view *view) 85 { 86 struct i915_vma *vma; 87 struct rb_node *rb, **p; 88 int i; 89 90 /* The aliasing_ppgtt should never be used directly! */ 91 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); 92 93 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); 94 if (vma == NULL) 95 return ERR_PTR(-ENOMEM); 96 97 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) 98 init_request_active(&vma->last_read[i], i915_vma_retire); 99 init_request_active(&vma->last_fence, NULL); 100 vma->vm = vm; 101 vma->obj = obj; 102 vma->resv = obj->resv; 103 vma->size = obj->base.size; 104 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 105 106 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 107 vma->ggtt_view = *view; 108 if (view->type == I915_GGTT_VIEW_PARTIAL) { 109 GEM_BUG_ON(range_overflows_t(u64, 110 view->partial.offset, 111 view->partial.size, 112 obj->base.size >> PAGE_SHIFT)); 113 vma->size = view->partial.size; 114 vma->size <<= PAGE_SHIFT; 115 GEM_BUG_ON(vma->size >= obj->base.size); 116 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 117 vma->size = intel_rotation_info_size(&view->rotated); 118 vma->size <<= PAGE_SHIFT; 119 } 120 } 121 122 if (unlikely(vma->size > vm->total)) 123 goto err_vma; 124 125 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 126 127 if (i915_is_ggtt(vm)) { 128 if (unlikely(overflows_type(vma->size, u32))) 129 goto err_vma; 130 131 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 132 i915_gem_object_get_tiling(obj), 133 i915_gem_object_get_stride(obj)); 134 if (unlikely(vma->fence_size < vma->size || /* overflow */ 135 vma->fence_size > vm->total)) 136 goto err_vma; 137 138 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 139 140 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 141 i915_gem_object_get_tiling(obj), 142 i915_gem_object_get_stride(obj)); 143 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 144 145 vma->flags |= I915_VMA_GGTT; 146 list_add(&vma->obj_link, &obj->vma_list); 147 } else { 148 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); 149 list_add_tail(&vma->obj_link, &obj->vma_list); 150 } 151 152 rb = NULL; 153 p = &obj->vma_tree.rb_node; 154 while (*p) { 155 struct i915_vma *pos; 156 157 rb = *p; 158 pos = rb_entry(rb, struct i915_vma, obj_node); 159 if (i915_vma_compare(pos, vm, view) < 0) 160 p = &rb->rb_right; 161 else 162 p = &rb->rb_left; 163 } 164 rb_link_node(&vma->obj_node, rb, p); 165 rb_insert_color(&vma->obj_node, &obj->vma_tree); 166 list_add(&vma->vm_link, &vm->unbound_list); 167 168 return vma; 169 170 err_vma: 171 kmem_cache_free(vm->i915->vmas, vma); 172 return ERR_PTR(-E2BIG); 173 } 174 175 static struct i915_vma * 176 vma_lookup(struct drm_i915_gem_object *obj, 177 struct i915_address_space *vm, 178 const struct i915_ggtt_view *view) 179 { 180 struct rb_node *rb; 181 182 rb = obj->vma_tree.rb_node; 183 while (rb) { 184 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 185 long cmp; 186 187 cmp = i915_vma_compare(vma, vm, view); 188 if (cmp == 0) 189 return vma; 190 191 if (cmp < 0) 192 rb = rb->rb_right; 193 else 194 rb = rb->rb_left; 195 } 196 197 return NULL; 198 } 199 200 /** 201 * i915_vma_instance - return the singleton instance of the VMA 202 * @obj: parent &struct drm_i915_gem_object to be mapped 203 * @vm: address space in which the mapping is located 204 * @view: additional mapping requirements 205 * 206 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 207 * the same @view characteristics. If a match is not found, one is created. 208 * Once created, the VMA is kept until either the object is freed, or the 209 * address space is closed. 210 * 211 * Must be called with struct_mutex held. 212 * 213 * Returns the vma, or an error pointer. 214 */ 215 struct i915_vma * 216 i915_vma_instance(struct drm_i915_gem_object *obj, 217 struct i915_address_space *vm, 218 const struct i915_ggtt_view *view) 219 { 220 struct i915_vma *vma; 221 222 lockdep_assert_held(&obj->base.dev->struct_mutex); 223 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 224 GEM_BUG_ON(vm->closed); 225 226 vma = vma_lookup(obj, vm, view); 227 if (!vma) 228 vma = vma_create(obj, vm, view); 229 230 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma)); 231 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 232 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma); 233 return vma; 234 } 235 236 /** 237 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 238 * @vma: VMA to map 239 * @cache_level: mapping cache level 240 * @flags: flags like global or local mapping 241 * 242 * DMA addresses are taken from the scatter-gather table of this object (or of 243 * this VMA in case of non-default GGTT views) and PTE entries set up. 244 * Note that DMA addresses are also the only part of the SG table we care about. 245 */ 246 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 247 u32 flags) 248 { 249 u32 bind_flags; 250 u32 vma_flags; 251 int ret; 252 253 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 254 GEM_BUG_ON(vma->size > vma->node.size); 255 256 if (GEM_WARN_ON(range_overflows(vma->node.start, 257 vma->node.size, 258 vma->vm->total))) 259 return -ENODEV; 260 261 if (GEM_WARN_ON(!flags)) 262 return -EINVAL; 263 264 bind_flags = 0; 265 if (flags & PIN_GLOBAL) 266 bind_flags |= I915_VMA_GLOBAL_BIND; 267 if (flags & PIN_USER) 268 bind_flags |= I915_VMA_LOCAL_BIND; 269 270 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 271 if (flags & PIN_UPDATE) 272 bind_flags |= vma_flags; 273 else 274 bind_flags &= ~vma_flags; 275 if (bind_flags == 0) 276 return 0; 277 278 GEM_BUG_ON(!vma->pages); 279 280 trace_i915_vma_bind(vma, bind_flags); 281 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 282 if (ret) 283 return ret; 284 285 vma->flags |= bind_flags; 286 return 0; 287 } 288 289 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 290 { 291 void __iomem *ptr; 292 int err; 293 294 /* Access through the GTT requires the device to be awake. */ 295 assert_rpm_wakelock_held(vma->vm->i915); 296 297 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 298 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 299 err = -ENODEV; 300 goto err; 301 } 302 303 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 304 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 305 306 ptr = vma->iomap; 307 if (ptr == NULL) { 308 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, 309 vma->node.start, 310 vma->node.size); 311 if (ptr == NULL) { 312 err = -ENOMEM; 313 goto err; 314 } 315 316 vma->iomap = ptr; 317 } 318 319 __i915_vma_pin(vma); 320 321 err = i915_vma_pin_fence(vma); 322 if (err) 323 goto err_unpin; 324 325 return ptr; 326 327 err_unpin: 328 __i915_vma_unpin(vma); 329 err: 330 return IO_ERR_PTR(err); 331 } 332 333 void i915_vma_unpin_iomap(struct i915_vma *vma) 334 { 335 lockdep_assert_held(&vma->obj->base.dev->struct_mutex); 336 337 GEM_BUG_ON(vma->iomap == NULL); 338 339 i915_vma_unpin_fence(vma); 340 i915_vma_unpin(vma); 341 } 342 343 void i915_vma_unpin_and_release(struct i915_vma **p_vma) 344 { 345 struct i915_vma *vma; 346 struct drm_i915_gem_object *obj; 347 348 vma = fetch_and_zero(p_vma); 349 if (!vma) 350 return; 351 352 obj = vma->obj; 353 354 i915_vma_unpin(vma); 355 i915_vma_close(vma); 356 357 __i915_gem_object_release_unless_active(obj); 358 } 359 360 bool i915_vma_misplaced(const struct i915_vma *vma, 361 u64 size, u64 alignment, u64 flags) 362 { 363 if (!drm_mm_node_allocated(&vma->node)) 364 return false; 365 366 if (vma->node.size < size) 367 return true; 368 369 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 370 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 371 return true; 372 373 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 374 return true; 375 376 if (flags & PIN_OFFSET_BIAS && 377 vma->node.start < (flags & PIN_OFFSET_MASK)) 378 return true; 379 380 if (flags & PIN_OFFSET_FIXED && 381 vma->node.start != (flags & PIN_OFFSET_MASK)) 382 return true; 383 384 return false; 385 } 386 387 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 388 { 389 bool mappable, fenceable; 390 391 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 392 GEM_BUG_ON(!vma->fence_size); 393 394 /* 395 * Explicitly disable for rotated VMA since the display does not 396 * need the fence and the VMA is not accessible to other users. 397 */ 398 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 399 return; 400 401 fenceable = (vma->node.size >= vma->fence_size && 402 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 403 404 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 405 406 if (mappable && fenceable) 407 vma->flags |= I915_VMA_CAN_FENCE; 408 else 409 vma->flags &= ~I915_VMA_CAN_FENCE; 410 } 411 412 static bool color_differs(struct drm_mm_node *node, unsigned long color) 413 { 414 return node->allocated && node->color != color; 415 } 416 417 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) 418 { 419 struct drm_mm_node *node = &vma->node; 420 struct drm_mm_node *other; 421 422 /* 423 * On some machines we have to be careful when putting differing types 424 * of snoopable memory together to avoid the prefetcher crossing memory 425 * domains and dying. During vm initialisation, we decide whether or not 426 * these constraints apply and set the drm_mm.color_adjust 427 * appropriately. 428 */ 429 if (vma->vm->mm.color_adjust == NULL) 430 return true; 431 432 /* Only valid to be called on an already inserted vma */ 433 GEM_BUG_ON(!drm_mm_node_allocated(node)); 434 GEM_BUG_ON(list_empty(&node->node_list)); 435 436 other = list_prev_entry(node, node_list); 437 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) 438 return false; 439 440 other = list_next_entry(node, node_list); 441 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) 442 return false; 443 444 return true; 445 } 446 447 /** 448 * i915_vma_insert - finds a slot for the vma in its address space 449 * @vma: the vma 450 * @size: requested size in bytes (can be larger than the VMA) 451 * @alignment: required alignment 452 * @flags: mask of PIN_* flags to use 453 * 454 * First we try to allocate some free space that meets the requirements for 455 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 456 * preferrably the oldest idle entry to make room for the new VMA. 457 * 458 * Returns: 459 * 0 on success, negative error code otherwise. 460 */ 461 static int 462 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 463 { 464 struct drm_i915_private *dev_priv = vma->vm->i915; 465 struct drm_i915_gem_object *obj = vma->obj; 466 u64 start, end; 467 int ret; 468 469 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 470 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 471 472 size = max(size, vma->size); 473 alignment = max(alignment, vma->display_alignment); 474 if (flags & PIN_MAPPABLE) { 475 size = max_t(typeof(size), size, vma->fence_size); 476 alignment = max_t(typeof(alignment), 477 alignment, vma->fence_alignment); 478 } 479 480 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 481 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 482 GEM_BUG_ON(!is_power_of_2(alignment)); 483 484 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 485 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 486 487 end = vma->vm->total; 488 if (flags & PIN_MAPPABLE) 489 end = min_t(u64, end, dev_priv->ggtt.mappable_end); 490 if (flags & PIN_ZONE_4G) 491 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 492 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 493 494 /* If binding the object/GGTT view requires more space than the entire 495 * aperture has, reject it early before evicting everything in a vain 496 * attempt to find space. 497 */ 498 if (size > end) { 499 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", 500 size, obj->base.size, 501 flags & PIN_MAPPABLE ? "mappable" : "total", 502 end); 503 return -ENOSPC; 504 } 505 506 ret = i915_gem_object_pin_pages(obj); 507 if (ret) 508 return ret; 509 510 GEM_BUG_ON(vma->pages); 511 512 ret = vma->vm->set_pages(vma); 513 if (ret) 514 goto err_unpin; 515 516 if (flags & PIN_OFFSET_FIXED) { 517 u64 offset = flags & PIN_OFFSET_MASK; 518 if (!IS_ALIGNED(offset, alignment) || 519 range_overflows(offset, size, end)) { 520 ret = -EINVAL; 521 goto err_clear; 522 } 523 524 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 525 size, offset, obj->cache_level, 526 flags); 527 if (ret) 528 goto err_clear; 529 } else { 530 /* 531 * We only support huge gtt pages through the 48b PPGTT, 532 * however we also don't want to force any alignment for 533 * objects which need to be tightly packed into the low 32bits. 534 * 535 * Note that we assume that GGTT are limited to 4GiB for the 536 * forseeable future. See also i915_ggtt_offset(). 537 */ 538 if (upper_32_bits(end - 1) && 539 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 540 /* 541 * We can't mix 64K and 4K PTEs in the same page-table 542 * (2M block), and so to avoid the ugliness and 543 * complexity of coloring we opt for just aligning 64K 544 * objects to 2M. 545 */ 546 u64 page_alignment = 547 rounddown_pow_of_two(vma->page_sizes.sg | 548 I915_GTT_PAGE_SIZE_2M); 549 550 /* 551 * Check we don't expand for the limited Global GTT 552 * (mappable aperture is even more precious!). This 553 * also checks that we exclude the aliasing-ppgtt. 554 */ 555 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 556 557 alignment = max(alignment, page_alignment); 558 559 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 560 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 561 } 562 563 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 564 size, alignment, obj->cache_level, 565 start, end, flags); 566 if (ret) 567 goto err_clear; 568 569 GEM_BUG_ON(vma->node.start < start); 570 GEM_BUG_ON(vma->node.start + vma->node.size > end); 571 } 572 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 573 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); 574 575 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 576 577 spin_lock(&dev_priv->mm.obj_lock); 578 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); 579 obj->bind_count++; 580 spin_unlock(&dev_priv->mm.obj_lock); 581 582 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 583 584 return 0; 585 586 err_clear: 587 vma->vm->clear_pages(vma); 588 err_unpin: 589 i915_gem_object_unpin_pages(obj); 590 return ret; 591 } 592 593 static void 594 i915_vma_remove(struct i915_vma *vma) 595 { 596 struct drm_i915_private *i915 = vma->vm->i915; 597 struct drm_i915_gem_object *obj = vma->obj; 598 599 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 600 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 601 602 vma->vm->clear_pages(vma); 603 604 drm_mm_remove_node(&vma->node); 605 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 606 607 /* Since the unbound list is global, only move to that list if 608 * no more VMAs exist. 609 */ 610 spin_lock(&i915->mm.obj_lock); 611 if (--obj->bind_count == 0) 612 list_move_tail(&obj->mm.link, &i915->mm.unbound_list); 613 spin_unlock(&i915->mm.obj_lock); 614 615 /* And finally now the object is completely decoupled from this vma, 616 * we can drop its hold on the backing storage and allow it to be 617 * reaped by the shrinker. 618 */ 619 i915_gem_object_unpin_pages(obj); 620 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 621 } 622 623 int __i915_vma_do_pin(struct i915_vma *vma, 624 u64 size, u64 alignment, u64 flags) 625 { 626 const unsigned int bound = vma->flags; 627 int ret; 628 629 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 630 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 631 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 632 633 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 634 ret = -EBUSY; 635 goto err_unpin; 636 } 637 638 if ((bound & I915_VMA_BIND_MASK) == 0) { 639 ret = i915_vma_insert(vma, size, alignment, flags); 640 if (ret) 641 goto err_unpin; 642 } 643 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 644 645 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 646 if (ret) 647 goto err_remove; 648 649 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); 650 651 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 652 __i915_vma_set_map_and_fenceable(vma); 653 654 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 655 return 0; 656 657 err_remove: 658 if ((bound & I915_VMA_BIND_MASK) == 0) { 659 i915_vma_remove(vma); 660 GEM_BUG_ON(vma->pages); 661 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); 662 } 663 err_unpin: 664 __i915_vma_unpin(vma); 665 return ret; 666 } 667 668 static void i915_vma_destroy(struct i915_vma *vma) 669 { 670 int i; 671 672 GEM_BUG_ON(vma->node.allocated); 673 GEM_BUG_ON(i915_vma_is_active(vma)); 674 GEM_BUG_ON(!i915_vma_is_closed(vma)); 675 GEM_BUG_ON(vma->fence); 676 677 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) 678 GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); 679 GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); 680 681 list_del(&vma->vm_link); 682 if (!i915_vma_is_ggtt(vma)) 683 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); 684 685 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); 686 } 687 688 void i915_vma_close(struct i915_vma *vma) 689 { 690 GEM_BUG_ON(i915_vma_is_closed(vma)); 691 vma->flags |= I915_VMA_CLOSED; 692 693 list_del(&vma->obj_link); 694 rb_erase(&vma->obj_node, &vma->obj->vma_tree); 695 696 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) 697 WARN_ON(i915_vma_unbind(vma)); 698 } 699 700 static void __i915_vma_iounmap(struct i915_vma *vma) 701 { 702 GEM_BUG_ON(i915_vma_is_pinned(vma)); 703 704 if (vma->iomap == NULL) 705 return; 706 707 io_mapping_unmap(vma->iomap); 708 vma->iomap = NULL; 709 } 710 711 void i915_vma_revoke_mmap(struct i915_vma *vma) 712 { 713 struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 714 u64 vma_offset; 715 716 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 717 718 if (!i915_vma_has_userfault(vma)) 719 return; 720 721 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 722 GEM_BUG_ON(!vma->obj->userfault_count); 723 724 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 725 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 726 drm_vma_node_offset_addr(node) + vma_offset, 727 vma->size, 728 1); 729 730 i915_vma_unset_userfault(vma); 731 if (!--vma->obj->userfault_count) 732 list_del(&vma->obj->userfault_link); 733 } 734 735 int i915_vma_unbind(struct i915_vma *vma) 736 { 737 struct drm_i915_gem_object *obj = vma->obj; 738 unsigned long active; 739 int ret; 740 741 lockdep_assert_held(&obj->base.dev->struct_mutex); 742 743 /* First wait upon any activity as retiring the request may 744 * have side-effects such as unpinning or even unbinding this vma. 745 */ 746 might_sleep(); 747 active = i915_vma_get_active(vma); 748 if (active) { 749 int idx; 750 751 /* When a closed VMA is retired, it is unbound - eek. 752 * In order to prevent it from being recursively closed, 753 * take a pin on the vma so that the second unbind is 754 * aborted. 755 * 756 * Even more scary is that the retire callback may free 757 * the object (last active vma). To prevent the explosion 758 * we defer the actual object free to a worker that can 759 * only proceed once it acquires the struct_mutex (which 760 * we currently hold, therefore it cannot free this object 761 * before we are finished). 762 */ 763 __i915_vma_pin(vma); 764 765 for_each_active(active, idx) { 766 ret = i915_gem_active_retire(&vma->last_read[idx], 767 &vma->vm->i915->drm.struct_mutex); 768 if (ret) 769 break; 770 } 771 772 if (!ret) { 773 ret = i915_gem_active_retire(&vma->last_fence, 774 &vma->vm->i915->drm.struct_mutex); 775 } 776 777 __i915_vma_unpin(vma); 778 if (ret) 779 return ret; 780 } 781 GEM_BUG_ON(i915_vma_is_active(vma)); 782 783 if (i915_vma_is_pinned(vma)) 784 return -EBUSY; 785 786 if (!drm_mm_node_allocated(&vma->node)) 787 goto destroy; 788 789 GEM_BUG_ON(obj->bind_count == 0); 790 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 791 792 if (i915_vma_is_map_and_fenceable(vma)) { 793 /* release the fence reg _after_ flushing */ 794 ret = i915_vma_put_fence(vma); 795 if (ret) 796 return ret; 797 798 /* Force a pagefault for domain tracking on next user access */ 799 i915_vma_revoke_mmap(vma); 800 801 __i915_vma_iounmap(vma); 802 vma->flags &= ~I915_VMA_CAN_FENCE; 803 } 804 GEM_BUG_ON(vma->fence); 805 GEM_BUG_ON(i915_vma_has_userfault(vma)); 806 807 if (likely(!vma->vm->closed)) { 808 trace_i915_vma_unbind(vma); 809 vma->vm->unbind_vma(vma); 810 } 811 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 812 813 i915_vma_remove(vma); 814 815 destroy: 816 if (unlikely(i915_vma_is_closed(vma))) 817 i915_vma_destroy(vma); 818 819 return 0; 820 } 821 822 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 823 #include "selftests/i915_vma.c" 824 #endif 825