1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <drm/drm_gem.h> 27 28 #include "display/intel_frontbuffer.h" 29 30 #include "gt/intel_engine.h" 31 #include "gt/intel_engine_heartbeat.h" 32 #include "gt/intel_gt.h" 33 #include "gt/intel_gt_requests.h" 34 35 #include "i915_drv.h" 36 #include "i915_globals.h" 37 #include "i915_sw_fence_work.h" 38 #include "i915_trace.h" 39 #include "i915_vma.h" 40 41 static struct i915_global_vma { 42 struct i915_global base; 43 struct kmem_cache *slab_vmas; 44 } global; 45 46 struct i915_vma *i915_vma_alloc(void) 47 { 48 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 49 } 50 51 void i915_vma_free(struct i915_vma *vma) 52 { 53 return kmem_cache_free(global.slab_vmas, vma); 54 } 55 56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 57 58 #include <linux/stackdepot.h> 59 60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 61 { 62 unsigned long *entries; 63 unsigned int nr_entries; 64 char buf[512]; 65 66 if (!vma->node.stack) { 67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 68 vma->node.start, vma->node.size, reason); 69 return; 70 } 71 72 nr_entries = stack_depot_fetch(vma->node.stack, &entries); 73 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); 74 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 75 vma->node.start, vma->node.size, reason, buf); 76 } 77 78 #else 79 80 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 81 { 82 } 83 84 #endif 85 86 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 87 { 88 return container_of(ref, typeof(struct i915_vma), active); 89 } 90 91 static int __i915_vma_active(struct i915_active *ref) 92 { 93 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 94 } 95 96 __i915_active_call 97 static void __i915_vma_retire(struct i915_active *ref) 98 { 99 i915_vma_put(active_to_vma(ref)); 100 } 101 102 static struct i915_vma * 103 vma_create(struct drm_i915_gem_object *obj, 104 struct i915_address_space *vm, 105 const struct i915_ggtt_view *view) 106 { 107 struct i915_vma *vma; 108 struct rb_node *rb, **p; 109 110 /* The aliasing_ppgtt should never be used directly! */ 111 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 112 113 vma = i915_vma_alloc(); 114 if (vma == NULL) 115 return ERR_PTR(-ENOMEM); 116 117 kref_init(&vma->ref); 118 mutex_init(&vma->pages_mutex); 119 vma->vm = i915_vm_get(vm); 120 vma->ops = &vm->vma_ops; 121 vma->obj = obj; 122 vma->resv = obj->base.resv; 123 vma->size = obj->base.size; 124 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 125 126 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); 127 128 /* Declare ourselves safe for use inside shrinkers */ 129 if (IS_ENABLED(CONFIG_LOCKDEP)) { 130 fs_reclaim_acquire(GFP_KERNEL); 131 might_lock(&vma->active.mutex); 132 fs_reclaim_release(GFP_KERNEL); 133 } 134 135 INIT_LIST_HEAD(&vma->closed_link); 136 137 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 138 vma->ggtt_view = *view; 139 if (view->type == I915_GGTT_VIEW_PARTIAL) { 140 GEM_BUG_ON(range_overflows_t(u64, 141 view->partial.offset, 142 view->partial.size, 143 obj->base.size >> PAGE_SHIFT)); 144 vma->size = view->partial.size; 145 vma->size <<= PAGE_SHIFT; 146 GEM_BUG_ON(vma->size > obj->base.size); 147 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 148 vma->size = intel_rotation_info_size(&view->rotated); 149 vma->size <<= PAGE_SHIFT; 150 } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 151 vma->size = intel_remapped_info_size(&view->remapped); 152 vma->size <<= PAGE_SHIFT; 153 } 154 } 155 156 if (unlikely(vma->size > vm->total)) 157 goto err_vma; 158 159 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 160 161 spin_lock(&obj->vma.lock); 162 163 if (i915_is_ggtt(vm)) { 164 if (unlikely(overflows_type(vma->size, u32))) 165 goto err_unlock; 166 167 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 168 i915_gem_object_get_tiling(obj), 169 i915_gem_object_get_stride(obj)); 170 if (unlikely(vma->fence_size < vma->size || /* overflow */ 171 vma->fence_size > vm->total)) 172 goto err_unlock; 173 174 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 175 176 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 177 i915_gem_object_get_tiling(obj), 178 i915_gem_object_get_stride(obj)); 179 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 180 181 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 182 } 183 184 rb = NULL; 185 p = &obj->vma.tree.rb_node; 186 while (*p) { 187 struct i915_vma *pos; 188 long cmp; 189 190 rb = *p; 191 pos = rb_entry(rb, struct i915_vma, obj_node); 192 193 /* 194 * If the view already exists in the tree, another thread 195 * already created a matching vma, so return the older instance 196 * and dispose of ours. 197 */ 198 cmp = i915_vma_compare(pos, vm, view); 199 if (cmp == 0) { 200 spin_unlock(&obj->vma.lock); 201 i915_vm_put(vm); 202 i915_vma_free(vma); 203 return pos; 204 } 205 206 if (cmp < 0) 207 p = &rb->rb_right; 208 else 209 p = &rb->rb_left; 210 } 211 rb_link_node(&vma->obj_node, rb, p); 212 rb_insert_color(&vma->obj_node, &obj->vma.tree); 213 214 if (i915_vma_is_ggtt(vma)) 215 /* 216 * We put the GGTT vma at the start of the vma-list, followed 217 * by the ppGGTT vma. This allows us to break early when 218 * iterating over only the GGTT vma for an object, see 219 * for_each_ggtt_vma() 220 */ 221 list_add(&vma->obj_link, &obj->vma.list); 222 else 223 list_add_tail(&vma->obj_link, &obj->vma.list); 224 225 spin_unlock(&obj->vma.lock); 226 227 return vma; 228 229 err_unlock: 230 spin_unlock(&obj->vma.lock); 231 err_vma: 232 i915_vma_free(vma); 233 return ERR_PTR(-E2BIG); 234 } 235 236 static struct i915_vma * 237 vma_lookup(struct drm_i915_gem_object *obj, 238 struct i915_address_space *vm, 239 const struct i915_ggtt_view *view) 240 { 241 struct rb_node *rb; 242 243 rb = obj->vma.tree.rb_node; 244 while (rb) { 245 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 246 long cmp; 247 248 cmp = i915_vma_compare(vma, vm, view); 249 if (cmp == 0) 250 return vma; 251 252 if (cmp < 0) 253 rb = rb->rb_right; 254 else 255 rb = rb->rb_left; 256 } 257 258 return NULL; 259 } 260 261 /** 262 * i915_vma_instance - return the singleton instance of the VMA 263 * @obj: parent &struct drm_i915_gem_object to be mapped 264 * @vm: address space in which the mapping is located 265 * @view: additional mapping requirements 266 * 267 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 268 * the same @view characteristics. If a match is not found, one is created. 269 * Once created, the VMA is kept until either the object is freed, or the 270 * address space is closed. 271 * 272 * Returns the vma, or an error pointer. 273 */ 274 struct i915_vma * 275 i915_vma_instance(struct drm_i915_gem_object *obj, 276 struct i915_address_space *vm, 277 const struct i915_ggtt_view *view) 278 { 279 struct i915_vma *vma; 280 281 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 282 GEM_BUG_ON(!atomic_read(&vm->open)); 283 284 spin_lock(&obj->vma.lock); 285 vma = vma_lookup(obj, vm, view); 286 spin_unlock(&obj->vma.lock); 287 288 /* vma_create() will resolve the race if another creates the vma */ 289 if (unlikely(!vma)) 290 vma = vma_create(obj, vm, view); 291 292 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 293 return vma; 294 } 295 296 struct i915_vma_work { 297 struct dma_fence_work base; 298 struct i915_vma *vma; 299 struct drm_i915_gem_object *pinned; 300 struct i915_sw_dma_fence_cb cb; 301 enum i915_cache_level cache_level; 302 unsigned int flags; 303 }; 304 305 static int __vma_bind(struct dma_fence_work *work) 306 { 307 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 308 struct i915_vma *vma = vw->vma; 309 int err; 310 311 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); 312 if (err) 313 atomic_or(I915_VMA_ERROR, &vma->flags); 314 315 return err; 316 } 317 318 static void __vma_release(struct dma_fence_work *work) 319 { 320 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 321 322 if (vw->pinned) 323 __i915_gem_object_unpin_pages(vw->pinned); 324 } 325 326 static const struct dma_fence_work_ops bind_ops = { 327 .name = "bind", 328 .work = __vma_bind, 329 .release = __vma_release, 330 }; 331 332 struct i915_vma_work *i915_vma_work(void) 333 { 334 struct i915_vma_work *vw; 335 336 vw = kzalloc(sizeof(*vw), GFP_KERNEL); 337 if (!vw) 338 return NULL; 339 340 dma_fence_work_init(&vw->base, &bind_ops); 341 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 342 343 return vw; 344 } 345 346 int i915_vma_wait_for_bind(struct i915_vma *vma) 347 { 348 int err = 0; 349 350 if (rcu_access_pointer(vma->active.excl.fence)) { 351 struct dma_fence *fence; 352 353 rcu_read_lock(); 354 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 355 rcu_read_unlock(); 356 if (fence) { 357 err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); 358 dma_fence_put(fence); 359 } 360 } 361 362 return err; 363 } 364 365 /** 366 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 367 * @vma: VMA to map 368 * @cache_level: mapping cache level 369 * @flags: flags like global or local mapping 370 * @work: preallocated worker for allocating and binding the PTE 371 * 372 * DMA addresses are taken from the scatter-gather table of this object (or of 373 * this VMA in case of non-default GGTT views) and PTE entries set up. 374 * Note that DMA addresses are also the only part of the SG table we care about. 375 */ 376 int i915_vma_bind(struct i915_vma *vma, 377 enum i915_cache_level cache_level, 378 u32 flags, 379 struct i915_vma_work *work) 380 { 381 u32 bind_flags; 382 u32 vma_flags; 383 int ret; 384 385 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 386 GEM_BUG_ON(vma->size > vma->node.size); 387 388 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 389 vma->node.size, 390 vma->vm->total))) 391 return -ENODEV; 392 393 if (GEM_DEBUG_WARN_ON(!flags)) 394 return -EINVAL; 395 396 bind_flags = flags; 397 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 398 399 vma_flags = atomic_read(&vma->flags); 400 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 401 if (flags & PIN_UPDATE) 402 bind_flags |= vma_flags; 403 else 404 bind_flags &= ~vma_flags; 405 if (bind_flags == 0) 406 return 0; 407 408 GEM_BUG_ON(!vma->pages); 409 410 trace_i915_vma_bind(vma, bind_flags); 411 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) { 412 struct dma_fence *prev; 413 414 work->vma = vma; 415 work->cache_level = cache_level; 416 work->flags = bind_flags | I915_VMA_ALLOC; 417 418 /* 419 * Note we only want to chain up to the migration fence on 420 * the pages (not the object itself). As we don't track that, 421 * yet, we have to use the exclusive fence instead. 422 * 423 * Also note that we do not want to track the async vma as 424 * part of the obj->resv->excl_fence as it only affects 425 * execution and not content or object's backing store lifetime. 426 */ 427 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 428 if (prev) { 429 __i915_sw_fence_await_dma_fence(&work->base.chain, 430 prev, 431 &work->cb); 432 dma_fence_put(prev); 433 } 434 435 work->base.dma.error = 0; /* enable the queue_work() */ 436 437 if (vma->obj) { 438 __i915_gem_object_pin_pages(vma->obj); 439 work->pinned = vma->obj; 440 } 441 } else { 442 ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 443 if (ret) 444 return ret; 445 } 446 447 atomic_or(bind_flags, &vma->flags); 448 return 0; 449 } 450 451 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 452 { 453 void __iomem *ptr; 454 int err; 455 456 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 457 err = -ENODEV; 458 goto err; 459 } 460 461 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 462 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 463 464 ptr = READ_ONCE(vma->iomap); 465 if (ptr == NULL) { 466 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 467 vma->node.start, 468 vma->node.size); 469 if (ptr == NULL) { 470 err = -ENOMEM; 471 goto err; 472 } 473 474 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 475 io_mapping_unmap(ptr); 476 ptr = vma->iomap; 477 } 478 } 479 480 __i915_vma_pin(vma); 481 482 err = i915_vma_pin_fence(vma); 483 if (err) 484 goto err_unpin; 485 486 i915_vma_set_ggtt_write(vma); 487 488 /* NB Access through the GTT requires the device to be awake. */ 489 return ptr; 490 491 err_unpin: 492 __i915_vma_unpin(vma); 493 err: 494 return IO_ERR_PTR(err); 495 } 496 497 void i915_vma_flush_writes(struct i915_vma *vma) 498 { 499 if (i915_vma_unset_ggtt_write(vma)) 500 intel_gt_flush_ggtt_writes(vma->vm->gt); 501 } 502 503 void i915_vma_unpin_iomap(struct i915_vma *vma) 504 { 505 GEM_BUG_ON(vma->iomap == NULL); 506 507 i915_vma_flush_writes(vma); 508 509 i915_vma_unpin_fence(vma); 510 i915_vma_unpin(vma); 511 } 512 513 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 514 { 515 struct i915_vma *vma; 516 struct drm_i915_gem_object *obj; 517 518 vma = fetch_and_zero(p_vma); 519 if (!vma) 520 return; 521 522 obj = vma->obj; 523 GEM_BUG_ON(!obj); 524 525 i915_vma_unpin(vma); 526 527 if (flags & I915_VMA_RELEASE_MAP) 528 i915_gem_object_unpin_map(obj); 529 530 i915_gem_object_put(obj); 531 } 532 533 bool i915_vma_misplaced(const struct i915_vma *vma, 534 u64 size, u64 alignment, u64 flags) 535 { 536 if (!drm_mm_node_allocated(&vma->node)) 537 return false; 538 539 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 540 return true; 541 542 if (vma->node.size < size) 543 return true; 544 545 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 546 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 547 return true; 548 549 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 550 return true; 551 552 if (flags & PIN_OFFSET_BIAS && 553 vma->node.start < (flags & PIN_OFFSET_MASK)) 554 return true; 555 556 if (flags & PIN_OFFSET_FIXED && 557 vma->node.start != (flags & PIN_OFFSET_MASK)) 558 return true; 559 560 return false; 561 } 562 563 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 564 { 565 bool mappable, fenceable; 566 567 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 568 GEM_BUG_ON(!vma->fence_size); 569 570 fenceable = (vma->node.size >= vma->fence_size && 571 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 572 573 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 574 575 if (mappable && fenceable) 576 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 577 else 578 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 579 } 580 581 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 582 { 583 struct drm_mm_node *node = &vma->node; 584 struct drm_mm_node *other; 585 586 /* 587 * On some machines we have to be careful when putting differing types 588 * of snoopable memory together to avoid the prefetcher crossing memory 589 * domains and dying. During vm initialisation, we decide whether or not 590 * these constraints apply and set the drm_mm.color_adjust 591 * appropriately. 592 */ 593 if (!i915_vm_has_cache_coloring(vma->vm)) 594 return true; 595 596 /* Only valid to be called on an already inserted vma */ 597 GEM_BUG_ON(!drm_mm_node_allocated(node)); 598 GEM_BUG_ON(list_empty(&node->node_list)); 599 600 other = list_prev_entry(node, node_list); 601 if (i915_node_color_differs(other, color) && 602 !drm_mm_hole_follows(other)) 603 return false; 604 605 other = list_next_entry(node, node_list); 606 if (i915_node_color_differs(other, color) && 607 !drm_mm_hole_follows(node)) 608 return false; 609 610 return true; 611 } 612 613 /** 614 * i915_vma_insert - finds a slot for the vma in its address space 615 * @vma: the vma 616 * @size: requested size in bytes (can be larger than the VMA) 617 * @alignment: required alignment 618 * @flags: mask of PIN_* flags to use 619 * 620 * First we try to allocate some free space that meets the requirements for 621 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 622 * preferrably the oldest idle entry to make room for the new VMA. 623 * 624 * Returns: 625 * 0 on success, negative error code otherwise. 626 */ 627 static int 628 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 629 { 630 unsigned long color; 631 u64 start, end; 632 int ret; 633 634 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 635 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 636 637 size = max(size, vma->size); 638 alignment = max(alignment, vma->display_alignment); 639 if (flags & PIN_MAPPABLE) { 640 size = max_t(typeof(size), size, vma->fence_size); 641 alignment = max_t(typeof(alignment), 642 alignment, vma->fence_alignment); 643 } 644 645 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 646 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 647 GEM_BUG_ON(!is_power_of_2(alignment)); 648 649 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 650 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 651 652 end = vma->vm->total; 653 if (flags & PIN_MAPPABLE) 654 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 655 if (flags & PIN_ZONE_4G) 656 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 657 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 658 659 /* If binding the object/GGTT view requires more space than the entire 660 * aperture has, reject it early before evicting everything in a vain 661 * attempt to find space. 662 */ 663 if (size > end) { 664 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 665 size, flags & PIN_MAPPABLE ? "mappable" : "total", 666 end); 667 return -ENOSPC; 668 } 669 670 color = 0; 671 if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) 672 color = vma->obj->cache_level; 673 674 if (flags & PIN_OFFSET_FIXED) { 675 u64 offset = flags & PIN_OFFSET_MASK; 676 if (!IS_ALIGNED(offset, alignment) || 677 range_overflows(offset, size, end)) 678 return -EINVAL; 679 680 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 681 size, offset, color, 682 flags); 683 if (ret) 684 return ret; 685 } else { 686 /* 687 * We only support huge gtt pages through the 48b PPGTT, 688 * however we also don't want to force any alignment for 689 * objects which need to be tightly packed into the low 32bits. 690 * 691 * Note that we assume that GGTT are limited to 4GiB for the 692 * forseeable future. See also i915_ggtt_offset(). 693 */ 694 if (upper_32_bits(end - 1) && 695 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 696 /* 697 * We can't mix 64K and 4K PTEs in the same page-table 698 * (2M block), and so to avoid the ugliness and 699 * complexity of coloring we opt for just aligning 64K 700 * objects to 2M. 701 */ 702 u64 page_alignment = 703 rounddown_pow_of_two(vma->page_sizes.sg | 704 I915_GTT_PAGE_SIZE_2M); 705 706 /* 707 * Check we don't expand for the limited Global GTT 708 * (mappable aperture is even more precious!). This 709 * also checks that we exclude the aliasing-ppgtt. 710 */ 711 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 712 713 alignment = max(alignment, page_alignment); 714 715 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 716 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 717 } 718 719 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 720 size, alignment, color, 721 start, end, flags); 722 if (ret) 723 return ret; 724 725 GEM_BUG_ON(vma->node.start < start); 726 GEM_BUG_ON(vma->node.start + vma->node.size > end); 727 } 728 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 729 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 730 731 list_add_tail(&vma->vm_link, &vma->vm->bound_list); 732 733 return 0; 734 } 735 736 static void 737 i915_vma_detach(struct i915_vma *vma) 738 { 739 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 740 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 741 742 /* 743 * And finally now the object is completely decoupled from this 744 * vma, we can drop its hold on the backing storage and allow 745 * it to be reaped by the shrinker. 746 */ 747 list_del(&vma->vm_link); 748 } 749 750 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 751 { 752 unsigned int bound; 753 bool pinned = true; 754 755 bound = atomic_read(&vma->flags); 756 do { 757 if (unlikely(flags & ~bound)) 758 return false; 759 760 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 761 return false; 762 763 if (!(bound & I915_VMA_PIN_MASK)) 764 goto unpinned; 765 766 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 767 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 768 769 return true; 770 771 unpinned: 772 /* 773 * If pin_count==0, but we are bound, check under the lock to avoid 774 * racing with a concurrent i915_vma_unbind(). 775 */ 776 mutex_lock(&vma->vm->mutex); 777 do { 778 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { 779 pinned = false; 780 break; 781 } 782 783 if (unlikely(flags & ~bound)) { 784 pinned = false; 785 break; 786 } 787 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 788 mutex_unlock(&vma->vm->mutex); 789 790 return pinned; 791 } 792 793 static int vma_get_pages(struct i915_vma *vma) 794 { 795 int err = 0; 796 797 if (atomic_add_unless(&vma->pages_count, 1, 0)) 798 return 0; 799 800 /* Allocations ahoy! */ 801 if (mutex_lock_interruptible(&vma->pages_mutex)) 802 return -EINTR; 803 804 if (!atomic_read(&vma->pages_count)) { 805 if (vma->obj) { 806 err = i915_gem_object_pin_pages(vma->obj); 807 if (err) 808 goto unlock; 809 } 810 811 err = vma->ops->set_pages(vma); 812 if (err) { 813 if (vma->obj) 814 i915_gem_object_unpin_pages(vma->obj); 815 goto unlock; 816 } 817 } 818 atomic_inc(&vma->pages_count); 819 820 unlock: 821 mutex_unlock(&vma->pages_mutex); 822 823 return err; 824 } 825 826 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 827 { 828 /* We allocate under vma_get_pages, so beware the shrinker */ 829 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); 830 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 831 if (atomic_sub_return(count, &vma->pages_count) == 0) { 832 vma->ops->clear_pages(vma); 833 GEM_BUG_ON(vma->pages); 834 if (vma->obj) 835 i915_gem_object_unpin_pages(vma->obj); 836 } 837 mutex_unlock(&vma->pages_mutex); 838 } 839 840 static void vma_put_pages(struct i915_vma *vma) 841 { 842 if (atomic_add_unless(&vma->pages_count, -1, 1)) 843 return; 844 845 __vma_put_pages(vma, 1); 846 } 847 848 static void vma_unbind_pages(struct i915_vma *vma) 849 { 850 unsigned int count; 851 852 lockdep_assert_held(&vma->vm->mutex); 853 854 /* The upper portion of pages_count is the number of bindings */ 855 count = atomic_read(&vma->pages_count); 856 count >>= I915_VMA_PAGES_BIAS; 857 GEM_BUG_ON(!count); 858 859 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 860 } 861 862 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 863 { 864 struct i915_vma_work *work = NULL; 865 intel_wakeref_t wakeref = 0; 866 unsigned int bound; 867 int err; 868 869 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 870 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 871 872 GEM_BUG_ON(flags & PIN_UPDATE); 873 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 874 875 /* First try and grab the pin without rebinding the vma */ 876 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) 877 return 0; 878 879 err = vma_get_pages(vma); 880 if (err) 881 return err; 882 883 if (flags & vma->vm->bind_async_flags) { 884 work = i915_vma_work(); 885 if (!work) { 886 err = -ENOMEM; 887 goto err_pages; 888 } 889 } 890 891 if (flags & PIN_GLOBAL) 892 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 893 894 /* 895 * Differentiate between user/kernel vma inside the aliasing-ppgtt. 896 * 897 * We conflate the Global GTT with the user's vma when using the 898 * aliasing-ppgtt, but it is still vitally important to try and 899 * keep the use cases distinct. For example, userptr objects are 900 * not allowed inside the Global GTT as that will cause lock 901 * inversions when we have to evict them the mmu_notifier callbacks - 902 * but they are allowed to be part of the user ppGTT which can never 903 * be mapped. As such we try to give the distinct users of the same 904 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 905 * and i915_ppgtt separate]. 906 * 907 * NB this may cause us to mask real lock inversions -- while the 908 * code is safe today, lockdep may not be able to spot future 909 * transgressions. 910 */ 911 err = mutex_lock_interruptible_nested(&vma->vm->mutex, 912 !(flags & PIN_GLOBAL)); 913 if (err) 914 goto err_fence; 915 916 /* No more allocations allowed now we hold vm->mutex */ 917 918 if (unlikely(i915_vma_is_closed(vma))) { 919 err = -ENOENT; 920 goto err_unlock; 921 } 922 923 bound = atomic_read(&vma->flags); 924 if (unlikely(bound & I915_VMA_ERROR)) { 925 err = -ENOMEM; 926 goto err_unlock; 927 } 928 929 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 930 err = -EAGAIN; /* pins are meant to be fairly temporary */ 931 goto err_unlock; 932 } 933 934 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 935 __i915_vma_pin(vma); 936 goto err_unlock; 937 } 938 939 err = i915_active_acquire(&vma->active); 940 if (err) 941 goto err_unlock; 942 943 if (!(bound & I915_VMA_BIND_MASK)) { 944 err = i915_vma_insert(vma, size, alignment, flags); 945 if (err) 946 goto err_active; 947 948 if (i915_is_ggtt(vma->vm)) 949 __i915_vma_set_map_and_fenceable(vma); 950 } 951 952 GEM_BUG_ON(!vma->pages); 953 err = i915_vma_bind(vma, 954 vma->obj ? vma->obj->cache_level : 0, 955 flags, work); 956 if (err) 957 goto err_remove; 958 959 /* There should only be at most 2 active bindings (user, global) */ 960 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 961 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 962 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 963 964 __i915_vma_pin(vma); 965 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 966 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 967 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 968 969 err_remove: 970 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 971 i915_vma_detach(vma); 972 drm_mm_remove_node(&vma->node); 973 } 974 err_active: 975 i915_active_release(&vma->active); 976 err_unlock: 977 mutex_unlock(&vma->vm->mutex); 978 err_fence: 979 if (work) 980 dma_fence_work_commit_imm(&work->base); 981 if (wakeref) 982 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 983 err_pages: 984 vma_put_pages(vma); 985 return err; 986 } 987 988 static void flush_idle_contexts(struct intel_gt *gt) 989 { 990 struct intel_engine_cs *engine; 991 enum intel_engine_id id; 992 993 for_each_engine(engine, gt, id) 994 intel_engine_flush_barriers(engine); 995 996 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 997 } 998 999 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) 1000 { 1001 struct i915_address_space *vm = vma->vm; 1002 int err; 1003 1004 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1005 1006 do { 1007 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); 1008 if (err != -ENOSPC) { 1009 if (!err) { 1010 err = i915_vma_wait_for_bind(vma); 1011 if (err) 1012 i915_vma_unpin(vma); 1013 } 1014 return err; 1015 } 1016 1017 /* Unlike i915_vma_pin, we don't take no for an answer! */ 1018 flush_idle_contexts(vm->gt); 1019 if (mutex_lock_interruptible(&vm->mutex) == 0) { 1020 i915_gem_evict_vm(vm); 1021 mutex_unlock(&vm->mutex); 1022 } 1023 } while (1); 1024 } 1025 1026 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) 1027 { 1028 /* 1029 * We defer actually closing, unbinding and destroying the VMA until 1030 * the next idle point, or if the object is freed in the meantime. By 1031 * postponing the unbind, we allow for it to be resurrected by the 1032 * client, avoiding the work required to rebind the VMA. This is 1033 * advantageous for DRI, where the client/server pass objects 1034 * between themselves, temporarily opening a local VMA to the 1035 * object, and then closing it again. The same object is then reused 1036 * on the next frame (or two, depending on the depth of the swap queue) 1037 * causing us to rebind the VMA once more. This ends up being a lot 1038 * of wasted work for the steady state. 1039 */ 1040 GEM_BUG_ON(i915_vma_is_closed(vma)); 1041 list_add(&vma->closed_link, >->closed_vma); 1042 } 1043 1044 void i915_vma_close(struct i915_vma *vma) 1045 { 1046 struct intel_gt *gt = vma->vm->gt; 1047 unsigned long flags; 1048 1049 if (i915_vma_is_ggtt(vma)) 1050 return; 1051 1052 GEM_BUG_ON(!atomic_read(&vma->open_count)); 1053 if (atomic_dec_and_lock_irqsave(&vma->open_count, 1054 >->closed_lock, 1055 flags)) { 1056 __vma_close(vma, gt); 1057 spin_unlock_irqrestore(>->closed_lock, flags); 1058 } 1059 } 1060 1061 static void __i915_vma_remove_closed(struct i915_vma *vma) 1062 { 1063 struct intel_gt *gt = vma->vm->gt; 1064 1065 spin_lock_irq(>->closed_lock); 1066 list_del_init(&vma->closed_link); 1067 spin_unlock_irq(>->closed_lock); 1068 } 1069 1070 void i915_vma_reopen(struct i915_vma *vma) 1071 { 1072 if (i915_vma_is_closed(vma)) 1073 __i915_vma_remove_closed(vma); 1074 } 1075 1076 void i915_vma_release(struct kref *ref) 1077 { 1078 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); 1079 1080 if (drm_mm_node_allocated(&vma->node)) { 1081 mutex_lock(&vma->vm->mutex); 1082 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 1083 WARN_ON(__i915_vma_unbind(vma)); 1084 mutex_unlock(&vma->vm->mutex); 1085 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 1086 } 1087 GEM_BUG_ON(i915_vma_is_active(vma)); 1088 1089 if (vma->obj) { 1090 struct drm_i915_gem_object *obj = vma->obj; 1091 1092 spin_lock(&obj->vma.lock); 1093 list_del(&vma->obj_link); 1094 rb_erase(&vma->obj_node, &obj->vma.tree); 1095 spin_unlock(&obj->vma.lock); 1096 } 1097 1098 __i915_vma_remove_closed(vma); 1099 i915_vm_put(vma->vm); 1100 1101 i915_active_fini(&vma->active); 1102 i915_vma_free(vma); 1103 } 1104 1105 void i915_vma_parked(struct intel_gt *gt) 1106 { 1107 struct i915_vma *vma, *next; 1108 LIST_HEAD(closed); 1109 1110 spin_lock_irq(>->closed_lock); 1111 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 1112 struct drm_i915_gem_object *obj = vma->obj; 1113 struct i915_address_space *vm = vma->vm; 1114 1115 /* XXX All to avoid keeping a reference on i915_vma itself */ 1116 1117 if (!kref_get_unless_zero(&obj->base.refcount)) 1118 continue; 1119 1120 if (!i915_vm_tryopen(vm)) { 1121 i915_gem_object_put(obj); 1122 continue; 1123 } 1124 1125 list_move(&vma->closed_link, &closed); 1126 } 1127 spin_unlock_irq(>->closed_lock); 1128 1129 /* As the GT is held idle, no vma can be reopened as we destroy them */ 1130 list_for_each_entry_safe(vma, next, &closed, closed_link) { 1131 struct drm_i915_gem_object *obj = vma->obj; 1132 struct i915_address_space *vm = vma->vm; 1133 1134 INIT_LIST_HEAD(&vma->closed_link); 1135 __i915_vma_put(vma); 1136 1137 i915_gem_object_put(obj); 1138 i915_vm_close(vm); 1139 } 1140 } 1141 1142 static void __i915_vma_iounmap(struct i915_vma *vma) 1143 { 1144 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1145 1146 if (vma->iomap == NULL) 1147 return; 1148 1149 io_mapping_unmap(vma->iomap); 1150 vma->iomap = NULL; 1151 } 1152 1153 void i915_vma_revoke_mmap(struct i915_vma *vma) 1154 { 1155 struct drm_vma_offset_node *node; 1156 u64 vma_offset; 1157 1158 if (!i915_vma_has_userfault(vma)) 1159 return; 1160 1161 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1162 GEM_BUG_ON(!vma->obj->userfault_count); 1163 1164 node = &vma->mmo->vma_node; 1165 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 1166 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1167 drm_vma_node_offset_addr(node) + vma_offset, 1168 vma->size, 1169 1); 1170 1171 i915_vma_unset_userfault(vma); 1172 if (!--vma->obj->userfault_count) 1173 list_del(&vma->obj->userfault_link); 1174 } 1175 1176 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 1177 { 1178 int err; 1179 1180 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1181 1182 /* Wait for the vma to be bound before we start! */ 1183 err = i915_request_await_active(rq, &vma->active, 1184 I915_ACTIVE_AWAIT_EXCL); 1185 if (err) 1186 return err; 1187 1188 return i915_active_add_request(&vma->active, rq); 1189 } 1190 1191 int i915_vma_move_to_active(struct i915_vma *vma, 1192 struct i915_request *rq, 1193 unsigned int flags) 1194 { 1195 struct drm_i915_gem_object *obj = vma->obj; 1196 int err; 1197 1198 assert_object_held(obj); 1199 1200 err = __i915_vma_move_to_active(vma, rq); 1201 if (unlikely(err)) 1202 return err; 1203 1204 if (flags & EXEC_OBJECT_WRITE) { 1205 struct intel_frontbuffer *front; 1206 1207 front = __intel_frontbuffer_get(obj); 1208 if (unlikely(front)) { 1209 if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1210 i915_active_add_request(&front->write, rq); 1211 intel_frontbuffer_put(front); 1212 } 1213 1214 dma_resv_add_excl_fence(vma->resv, &rq->fence); 1215 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1216 obj->read_domains = 0; 1217 } else { 1218 err = dma_resv_reserve_shared(vma->resv, 1); 1219 if (unlikely(err)) 1220 return err; 1221 1222 dma_resv_add_shared_fence(vma->resv, &rq->fence); 1223 obj->write_domain = 0; 1224 } 1225 1226 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 1227 i915_active_add_request(&vma->fence->active, rq); 1228 1229 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1230 obj->mm.dirty = true; 1231 1232 GEM_BUG_ON(!i915_vma_is_active(vma)); 1233 return 0; 1234 } 1235 1236 int __i915_vma_unbind(struct i915_vma *vma) 1237 { 1238 int ret; 1239 1240 lockdep_assert_held(&vma->vm->mutex); 1241 1242 if (i915_vma_is_pinned(vma)) { 1243 vma_print_allocator(vma, "is pinned"); 1244 return -EAGAIN; 1245 } 1246 1247 /* 1248 * After confirming that no one else is pinning this vma, wait for 1249 * any laggards who may have crept in during the wait (through 1250 * a residual pin skipping the vm->mutex) to complete. 1251 */ 1252 ret = i915_vma_sync(vma); 1253 if (ret) 1254 return ret; 1255 1256 if (!drm_mm_node_allocated(&vma->node)) 1257 return 0; 1258 1259 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1260 GEM_BUG_ON(i915_vma_is_active(vma)); 1261 1262 if (i915_vma_is_map_and_fenceable(vma)) { 1263 /* Force a pagefault for domain tracking on next user access */ 1264 i915_vma_revoke_mmap(vma); 1265 1266 /* 1267 * Check that we have flushed all writes through the GGTT 1268 * before the unbind, other due to non-strict nature of those 1269 * indirect writes they may end up referencing the GGTT PTE 1270 * after the unbind. 1271 * 1272 * Note that we may be concurrently poking at the GGTT_WRITE 1273 * bit from set-domain, as we mark all GGTT vma associated 1274 * with an object. We know this is for another vma, as we 1275 * are currently unbinding this one -- so if this vma will be 1276 * reused, it will be refaulted and have its dirty bit set 1277 * before the next write. 1278 */ 1279 i915_vma_flush_writes(vma); 1280 1281 /* release the fence reg _after_ flushing */ 1282 i915_vma_revoke_fence(vma); 1283 1284 __i915_vma_iounmap(vma); 1285 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1286 } 1287 GEM_BUG_ON(vma->fence); 1288 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1289 1290 if (likely(atomic_read(&vma->vm->open))) { 1291 trace_i915_vma_unbind(vma); 1292 vma->ops->unbind_vma(vma); 1293 } 1294 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 1295 &vma->flags); 1296 1297 i915_vma_detach(vma); 1298 vma_unbind_pages(vma); 1299 1300 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 1301 return 0; 1302 } 1303 1304 int i915_vma_unbind(struct i915_vma *vma) 1305 { 1306 struct i915_address_space *vm = vma->vm; 1307 intel_wakeref_t wakeref = 0; 1308 int err; 1309 1310 if (!drm_mm_node_allocated(&vma->node)) 1311 return 0; 1312 1313 /* Optimistic wait before taking the mutex */ 1314 err = i915_vma_sync(vma); 1315 if (err) 1316 goto out_rpm; 1317 1318 if (i915_vma_is_pinned(vma)) { 1319 vma_print_allocator(vma, "is pinned"); 1320 return -EAGAIN; 1321 } 1322 1323 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 1324 /* XXX not always required: nop_clear_range */ 1325 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 1326 1327 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 1328 if (err) 1329 goto out_rpm; 1330 1331 err = __i915_vma_unbind(vma); 1332 mutex_unlock(&vm->mutex); 1333 1334 out_rpm: 1335 if (wakeref) 1336 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 1337 return err; 1338 } 1339 1340 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 1341 { 1342 i915_gem_object_make_unshrinkable(vma->obj); 1343 return vma; 1344 } 1345 1346 void i915_vma_make_shrinkable(struct i915_vma *vma) 1347 { 1348 i915_gem_object_make_shrinkable(vma->obj); 1349 } 1350 1351 void i915_vma_make_purgeable(struct i915_vma *vma) 1352 { 1353 i915_gem_object_make_purgeable(vma->obj); 1354 } 1355 1356 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1357 #include "selftests/i915_vma.c" 1358 #endif 1359 1360 static void i915_global_vma_shrink(void) 1361 { 1362 kmem_cache_shrink(global.slab_vmas); 1363 } 1364 1365 static void i915_global_vma_exit(void) 1366 { 1367 kmem_cache_destroy(global.slab_vmas); 1368 } 1369 1370 static struct i915_global_vma global = { { 1371 .shrink = i915_global_vma_shrink, 1372 .exit = i915_global_vma_exit, 1373 } }; 1374 1375 int __init i915_global_vma_init(void) 1376 { 1377 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 1378 if (!global.slab_vmas) 1379 return -ENOMEM; 1380 1381 i915_global_register(&global.base); 1382 return 0; 1383 } 1384