1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/highmem.h> 26 #include <linux/sched/mm.h> 27 28 #include <drm/drm_cache.h> 29 30 #include "display/intel_frontbuffer.h" 31 #include "pxp/intel_pxp.h" 32 33 #include "i915_drv.h" 34 #include "i915_file_private.h" 35 #include "i915_gem_clflush.h" 36 #include "i915_gem_context.h" 37 #include "i915_gem_dmabuf.h" 38 #include "i915_gem_mman.h" 39 #include "i915_gem_object.h" 40 #include "i915_gem_ttm.h" 41 #include "i915_memcpy.h" 42 #include "i915_trace.h" 43 44 static struct kmem_cache *slab_objects; 45 46 static const struct drm_gem_object_funcs i915_gem_object_funcs; 47 48 struct drm_i915_gem_object *i915_gem_object_alloc(void) 49 { 50 struct drm_i915_gem_object *obj; 51 52 obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL); 53 if (!obj) 54 return NULL; 55 obj->base.funcs = &i915_gem_object_funcs; 56 57 return obj; 58 } 59 60 void i915_gem_object_free(struct drm_i915_gem_object *obj) 61 { 62 return kmem_cache_free(slab_objects, obj); 63 } 64 65 void i915_gem_object_init(struct drm_i915_gem_object *obj, 66 const struct drm_i915_gem_object_ops *ops, 67 struct lock_class_key *key, unsigned flags) 68 { 69 /* 70 * A gem object is embedded both in a struct ttm_buffer_object :/ and 71 * in a drm_i915_gem_object. Make sure they are aliased. 72 */ 73 BUILD_BUG_ON(offsetof(typeof(*obj), base) != 74 offsetof(typeof(*obj), __do_not_access.base)); 75 76 spin_lock_init(&obj->vma.lock); 77 INIT_LIST_HEAD(&obj->vma.list); 78 79 INIT_LIST_HEAD(&obj->mm.link); 80 81 INIT_LIST_HEAD(&obj->lut_list); 82 spin_lock_init(&obj->lut_lock); 83 84 spin_lock_init(&obj->mmo.lock); 85 obj->mmo.offsets = RB_ROOT; 86 87 init_rcu_head(&obj->rcu); 88 89 obj->ops = ops; 90 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); 91 obj->flags = flags; 92 93 obj->mm.madv = I915_MADV_WILLNEED; 94 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 95 mutex_init(&obj->mm.get_page.lock); 96 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); 97 mutex_init(&obj->mm.get_dma_page.lock); 98 } 99 100 /** 101 * __i915_gem_object_fini - Clean up a GEM object initialization 102 * @obj: The gem object to cleanup 103 * 104 * This function cleans up gem object fields that are set up by 105 * drm_gem_private_object_init() and i915_gem_object_init(). 106 * It's primarily intended as a helper for backends that need to 107 * clean up the gem object in separate steps. 108 */ 109 void __i915_gem_object_fini(struct drm_i915_gem_object *obj) 110 { 111 mutex_destroy(&obj->mm.get_page.lock); 112 mutex_destroy(&obj->mm.get_dma_page.lock); 113 dma_resv_fini(&obj->base._resv); 114 } 115 116 /** 117 * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels 118 * for a given cache_level 119 * @obj: #drm_i915_gem_object 120 * @cache_level: cache level 121 */ 122 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 123 unsigned int cache_level) 124 { 125 struct drm_i915_private *i915 = to_i915(obj->base.dev); 126 127 obj->cache_level = cache_level; 128 129 if (cache_level != I915_CACHE_NONE) 130 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | 131 I915_BO_CACHE_COHERENT_FOR_WRITE); 132 else if (HAS_LLC(i915)) 133 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; 134 else 135 obj->cache_coherent = 0; 136 137 obj->cache_dirty = 138 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && 139 !IS_DGFX(i915); 140 } 141 142 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) 143 { 144 struct drm_i915_private *i915 = to_i915(obj->base.dev); 145 146 /* 147 * This is purely from a security perspective, so we simply don't care 148 * about non-userspace objects being able to bypass the LLC. 149 */ 150 if (!(obj->flags & I915_BO_ALLOC_USER)) 151 return false; 152 153 /* 154 * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it 155 * possible for userspace to bypass the GTT caching bits set by the 156 * kernel, as per the given object cache_level. This is troublesome 157 * since the heavy flush we apply when first gathering the pages is 158 * skipped if the kernel thinks the object is coherent with the GPU. As 159 * a result it might be possible to bypass the cache and read the 160 * contents of the page directly, which could be stale data. If it's 161 * just a case of userspace shooting themselves in the foot then so be 162 * it, but since i915 takes the stance of always zeroing memory before 163 * handing it to userspace, we need to prevent this. 164 */ 165 return IS_JSL_EHL(i915); 166 } 167 168 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 169 { 170 struct drm_i915_gem_object *obj = to_intel_bo(gem); 171 struct drm_i915_file_private *fpriv = file->driver_priv; 172 struct i915_lut_handle bookmark = {}; 173 struct i915_mmap_offset *mmo, *mn; 174 struct i915_lut_handle *lut, *ln; 175 LIST_HEAD(close); 176 177 spin_lock(&obj->lut_lock); 178 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { 179 struct i915_gem_context *ctx = lut->ctx; 180 181 if (ctx && ctx->file_priv == fpriv) { 182 i915_gem_context_get(ctx); 183 list_move(&lut->obj_link, &close); 184 } 185 186 /* Break long locks, and carefully continue on from this spot */ 187 if (&ln->obj_link != &obj->lut_list) { 188 list_add_tail(&bookmark.obj_link, &ln->obj_link); 189 if (cond_resched_lock(&obj->lut_lock)) 190 list_safe_reset_next(&bookmark, ln, obj_link); 191 __list_del_entry(&bookmark.obj_link); 192 } 193 } 194 spin_unlock(&obj->lut_lock); 195 196 spin_lock(&obj->mmo.lock); 197 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) 198 drm_vma_node_revoke(&mmo->vma_node, file); 199 spin_unlock(&obj->mmo.lock); 200 201 list_for_each_entry_safe(lut, ln, &close, obj_link) { 202 struct i915_gem_context *ctx = lut->ctx; 203 struct i915_vma *vma; 204 205 /* 206 * We allow the process to have multiple handles to the same 207 * vma, in the same fd namespace, by virtue of flink/open. 208 */ 209 210 mutex_lock(&ctx->lut_mutex); 211 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 212 if (vma) { 213 GEM_BUG_ON(vma->obj != obj); 214 GEM_BUG_ON(!atomic_read(&vma->open_count)); 215 i915_vma_close(vma); 216 } 217 mutex_unlock(&ctx->lut_mutex); 218 219 i915_gem_context_put(lut->ctx); 220 i915_lut_handle_free(lut); 221 i915_gem_object_put(obj); 222 } 223 } 224 225 void __i915_gem_free_object_rcu(struct rcu_head *head) 226 { 227 struct drm_i915_gem_object *obj = 228 container_of(head, typeof(*obj), rcu); 229 struct drm_i915_private *i915 = to_i915(obj->base.dev); 230 231 i915_gem_object_free(obj); 232 233 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); 234 atomic_dec(&i915->mm.free_count); 235 } 236 237 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) 238 { 239 /* Skip serialisation and waking the device if known to be not used. */ 240 241 if (obj->userfault_count) 242 i915_gem_object_release_mmap_gtt(obj); 243 244 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { 245 struct i915_mmap_offset *mmo, *mn; 246 247 i915_gem_object_release_mmap_offset(obj); 248 249 rbtree_postorder_for_each_entry_safe(mmo, mn, 250 &obj->mmo.offsets, 251 offset) { 252 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 253 &mmo->vma_node); 254 kfree(mmo); 255 } 256 obj->mmo.offsets = RB_ROOT; 257 } 258 } 259 260 /** 261 * __i915_gem_object_pages_fini - Clean up pages use of a gem object 262 * @obj: The gem object to clean up 263 * 264 * This function cleans up usage of the object mm.pages member. It 265 * is intended for backends that need to clean up a gem object in 266 * separate steps and needs to be called when the object is idle before 267 * the object's backing memory is freed. 268 */ 269 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) 270 { 271 assert_object_held_shared(obj); 272 273 if (!list_empty(&obj->vma.list)) { 274 struct i915_vma *vma; 275 276 spin_lock(&obj->vma.lock); 277 while ((vma = list_first_entry_or_null(&obj->vma.list, 278 struct i915_vma, 279 obj_link))) { 280 GEM_BUG_ON(vma->obj != obj); 281 spin_unlock(&obj->vma.lock); 282 283 i915_vma_destroy(vma); 284 285 spin_lock(&obj->vma.lock); 286 } 287 spin_unlock(&obj->vma.lock); 288 } 289 290 __i915_gem_object_free_mmaps(obj); 291 292 atomic_set(&obj->mm.pages_pin_count, 0); 293 294 /* 295 * dma_buf_unmap_attachment() requires reservation to be 296 * locked. The imported GEM shouldn't share reservation lock 297 * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for 298 * dma-buf, so it's safe to take the lock. 299 */ 300 if (obj->base.import_attach) 301 i915_gem_object_lock(obj, NULL); 302 303 __i915_gem_object_put_pages(obj); 304 305 if (obj->base.import_attach) 306 i915_gem_object_unlock(obj); 307 308 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 309 } 310 311 void __i915_gem_free_object(struct drm_i915_gem_object *obj) 312 { 313 trace_i915_gem_object_destroy(obj); 314 315 GEM_BUG_ON(!list_empty(&obj->lut_list)); 316 317 bitmap_free(obj->bit_17); 318 319 if (obj->base.import_attach) 320 drm_prime_gem_destroy(&obj->base, NULL); 321 322 drm_gem_free_mmap_offset(&obj->base); 323 324 if (obj->ops->release) 325 obj->ops->release(obj); 326 327 if (obj->mm.n_placements > 1) 328 kfree(obj->mm.placements); 329 330 if (obj->shares_resv_from) 331 i915_vm_resv_put(obj->shares_resv_from); 332 333 __i915_gem_object_fini(obj); 334 } 335 336 static void __i915_gem_free_objects(struct drm_i915_private *i915, 337 struct llist_node *freed) 338 { 339 struct drm_i915_gem_object *obj, *on; 340 341 llist_for_each_entry_safe(obj, on, freed, freed) { 342 might_sleep(); 343 if (obj->ops->delayed_free) { 344 obj->ops->delayed_free(obj); 345 continue; 346 } 347 348 __i915_gem_object_pages_fini(obj); 349 __i915_gem_free_object(obj); 350 351 /* But keep the pointer alive for RCU-protected lookups */ 352 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 353 cond_resched(); 354 } 355 } 356 357 void i915_gem_flush_free_objects(struct drm_i915_private *i915) 358 { 359 struct llist_node *freed = llist_del_all(&i915->mm.free_list); 360 361 if (unlikely(freed)) 362 __i915_gem_free_objects(i915, freed); 363 } 364 365 static void __i915_gem_free_work(struct work_struct *work) 366 { 367 struct drm_i915_private *i915 = 368 container_of(work, struct drm_i915_private, mm.free_work); 369 370 i915_gem_flush_free_objects(i915); 371 } 372 373 static void i915_gem_free_object(struct drm_gem_object *gem_obj) 374 { 375 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 376 struct drm_i915_private *i915 = to_i915(obj->base.dev); 377 378 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); 379 380 /* 381 * Before we free the object, make sure any pure RCU-only 382 * read-side critical sections are complete, e.g. 383 * i915_gem_busy_ioctl(). For the corresponding synchronized 384 * lookup see i915_gem_object_lookup_rcu(). 385 */ 386 atomic_inc(&i915->mm.free_count); 387 388 /* 389 * Since we require blocking on struct_mutex to unbind the freed 390 * object from the GPU before releasing resources back to the 391 * system, we can not do that directly from the RCU callback (which may 392 * be a softirq context), but must instead then defer that work onto a 393 * kthread. We use the RCU callback rather than move the freed object 394 * directly onto the work queue so that we can mix between using the 395 * worker and performing frees directly from subsequent allocations for 396 * crude but effective memory throttling. 397 */ 398 399 if (llist_add(&obj->freed, &i915->mm.free_list)) 400 queue_work(i915->wq, &i915->mm.free_work); 401 } 402 403 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 404 enum fb_op_origin origin) 405 { 406 struct intel_frontbuffer *front; 407 408 front = __intel_frontbuffer_get(obj); 409 if (front) { 410 intel_frontbuffer_flush(front, origin); 411 intel_frontbuffer_put(front); 412 } 413 } 414 415 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 416 enum fb_op_origin origin) 417 { 418 struct intel_frontbuffer *front; 419 420 front = __intel_frontbuffer_get(obj); 421 if (front) { 422 intel_frontbuffer_invalidate(front, origin); 423 intel_frontbuffer_put(front); 424 } 425 } 426 427 static void 428 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) 429 { 430 void *src_map; 431 void *src_ptr; 432 433 src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT)); 434 435 src_ptr = src_map + offset_in_page(offset); 436 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 437 drm_clflush_virt_range(src_ptr, size); 438 memcpy(dst, src_ptr, size); 439 440 kunmap_atomic(src_map); 441 } 442 443 static void 444 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) 445 { 446 void __iomem *src_map; 447 void __iomem *src_ptr; 448 dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT); 449 450 src_map = io_mapping_map_wc(&obj->mm.region->iomap, 451 dma - obj->mm.region->region.start, 452 PAGE_SIZE); 453 454 src_ptr = src_map + offset_in_page(offset); 455 if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size)) 456 memcpy_fromio(dst, src_ptr, size); 457 458 io_mapping_unmap(src_map); 459 } 460 461 /** 462 * i915_gem_object_read_from_page - read data from the page of a GEM object 463 * @obj: GEM object to read from 464 * @offset: offset within the object 465 * @dst: buffer to store the read data 466 * @size: size to read 467 * 468 * Reads data from @obj at the specified offset. The requested region to read 469 * from can't cross a page boundary. The caller must ensure that @obj pages 470 * are pinned and that @obj is synced wrt. any related writes. 471 * 472 * Return: %0 on success or -ENODEV if the type of @obj's backing store is 473 * unsupported. 474 */ 475 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) 476 { 477 GEM_BUG_ON(offset >= obj->base.size); 478 GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); 479 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 480 481 if (i915_gem_object_has_struct_page(obj)) 482 i915_gem_object_read_from_page_kmap(obj, offset, dst, size); 483 else if (i915_gem_object_has_iomem(obj)) 484 i915_gem_object_read_from_page_iomap(obj, offset, dst, size); 485 else 486 return -ENODEV; 487 488 return 0; 489 } 490 491 /** 492 * i915_gem_object_evictable - Whether object is likely evictable after unbind. 493 * @obj: The object to check 494 * 495 * This function checks whether the object is likely unvictable after unbind. 496 * If the object is not locked when checking, the result is only advisory. 497 * If the object is locked when checking, and the function returns true, 498 * then an eviction should indeed be possible. But since unlocked vma 499 * unpinning and unbinding is currently possible, the object can actually 500 * become evictable even if this function returns false. 501 * 502 * Return: true if the object may be evictable. False otherwise. 503 */ 504 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj) 505 { 506 struct i915_vma *vma; 507 int pin_count = atomic_read(&obj->mm.pages_pin_count); 508 509 if (!pin_count) 510 return true; 511 512 spin_lock(&obj->vma.lock); 513 list_for_each_entry(vma, &obj->vma.list, obj_link) { 514 if (i915_vma_is_pinned(vma)) { 515 spin_unlock(&obj->vma.lock); 516 return false; 517 } 518 if (atomic_read(&vma->pages_count)) 519 pin_count--; 520 } 521 spin_unlock(&obj->vma.lock); 522 GEM_WARN_ON(pin_count < 0); 523 524 return pin_count == 0; 525 } 526 527 /** 528 * i915_gem_object_migratable - Whether the object is migratable out of the 529 * current region. 530 * @obj: Pointer to the object. 531 * 532 * Return: Whether the object is allowed to be resident in other 533 * regions than the current while pages are present. 534 */ 535 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj) 536 { 537 struct intel_memory_region *mr = READ_ONCE(obj->mm.region); 538 539 if (!mr) 540 return false; 541 542 return obj->mm.n_placements > 1; 543 } 544 545 /** 546 * i915_gem_object_has_struct_page - Whether the object is page-backed 547 * @obj: The object to query. 548 * 549 * This function should only be called while the object is locked or pinned, 550 * otherwise the page backing may change under the caller. 551 * 552 * Return: True if page-backed, false otherwise. 553 */ 554 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 555 { 556 #ifdef CONFIG_LOCKDEP 557 if (IS_DGFX(to_i915(obj->base.dev)) && 558 i915_gem_object_evictable((void __force *)obj)) 559 assert_object_held_shared(obj); 560 #endif 561 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE; 562 } 563 564 /** 565 * i915_gem_object_has_iomem - Whether the object is iomem-backed 566 * @obj: The object to query. 567 * 568 * This function should only be called while the object is locked or pinned, 569 * otherwise the iomem backing may change under the caller. 570 * 571 * Return: True if iomem-backed, false otherwise. 572 */ 573 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj) 574 { 575 #ifdef CONFIG_LOCKDEP 576 if (IS_DGFX(to_i915(obj->base.dev)) && 577 i915_gem_object_evictable((void __force *)obj)) 578 assert_object_held_shared(obj); 579 #endif 580 return obj->mem_flags & I915_BO_FLAG_IOMEM; 581 } 582 583 /** 584 * i915_gem_object_can_migrate - Whether an object likely can be migrated 585 * 586 * @obj: The object to migrate 587 * @id: The region intended to migrate to 588 * 589 * Check whether the object backend supports migration to the 590 * given region. Note that pinning may affect the ability to migrate as 591 * returned by this function. 592 * 593 * This function is primarily intended as a helper for checking the 594 * possibility to migrate objects and might be slightly less permissive 595 * than i915_gem_object_migrate() when it comes to objects with the 596 * I915_BO_ALLOC_USER flag set. 597 * 598 * Return: true if migration is possible, false otherwise. 599 */ 600 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, 601 enum intel_region_id id) 602 { 603 struct drm_i915_private *i915 = to_i915(obj->base.dev); 604 unsigned int num_allowed = obj->mm.n_placements; 605 struct intel_memory_region *mr; 606 unsigned int i; 607 608 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); 609 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); 610 611 mr = i915->mm.regions[id]; 612 if (!mr) 613 return false; 614 615 if (!IS_ALIGNED(obj->base.size, mr->min_page_size)) 616 return false; 617 618 if (obj->mm.region == mr) 619 return true; 620 621 if (!i915_gem_object_evictable(obj)) 622 return false; 623 624 if (!obj->ops->migrate) 625 return false; 626 627 if (!(obj->flags & I915_BO_ALLOC_USER)) 628 return true; 629 630 if (num_allowed == 0) 631 return false; 632 633 for (i = 0; i < num_allowed; ++i) { 634 if (mr == obj->mm.placements[i]) 635 return true; 636 } 637 638 return false; 639 } 640 641 /** 642 * i915_gem_object_migrate - Migrate an object to the desired region id 643 * @obj: The object to migrate. 644 * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may 645 * not be successful in evicting other objects to make room for this object. 646 * @id: The region id to migrate to. 647 * 648 * Attempt to migrate the object to the desired memory region. The 649 * object backend must support migration and the object may not be 650 * pinned, (explicitly pinned pages or pinned vmas). The object must 651 * be locked. 652 * On successful completion, the object will have pages pointing to 653 * memory in the new region, but an async migration task may not have 654 * completed yet, and to accomplish that, i915_gem_object_wait_migration() 655 * must be called. 656 * 657 * Note: the @ww parameter is not used yet, but included to make sure 658 * callers put some effort into obtaining a valid ww ctx if one is 659 * available. 660 * 661 * Return: 0 on success. Negative error code on failure. In particular may 662 * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance 663 * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and 664 * -EBUSY if the object is pinned. 665 */ 666 int i915_gem_object_migrate(struct drm_i915_gem_object *obj, 667 struct i915_gem_ww_ctx *ww, 668 enum intel_region_id id) 669 { 670 struct drm_i915_private *i915 = to_i915(obj->base.dev); 671 struct intel_memory_region *mr; 672 673 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); 674 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); 675 assert_object_held(obj); 676 677 mr = i915->mm.regions[id]; 678 GEM_BUG_ON(!mr); 679 680 if (!i915_gem_object_can_migrate(obj, id)) 681 return -EINVAL; 682 683 if (!obj->ops->migrate) { 684 if (GEM_WARN_ON(obj->mm.region != mr)) 685 return -EINVAL; 686 return 0; 687 } 688 689 return obj->ops->migrate(obj, mr); 690 } 691 692 /** 693 * i915_gem_object_placement_possible - Check whether the object can be 694 * placed at certain memory type 695 * @obj: Pointer to the object 696 * @type: The memory type to check 697 * 698 * Return: True if the object can be placed in @type. False otherwise. 699 */ 700 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, 701 enum intel_memory_type type) 702 { 703 unsigned int i; 704 705 if (!obj->mm.n_placements) { 706 switch (type) { 707 case INTEL_MEMORY_LOCAL: 708 return i915_gem_object_has_iomem(obj); 709 case INTEL_MEMORY_SYSTEM: 710 return i915_gem_object_has_pages(obj); 711 default: 712 /* Ignore stolen for now */ 713 GEM_BUG_ON(1); 714 return false; 715 } 716 } 717 718 for (i = 0; i < obj->mm.n_placements; i++) { 719 if (obj->mm.placements[i]->type == type) 720 return true; 721 } 722 723 return false; 724 } 725 726 /** 727 * i915_gem_object_needs_ccs_pages - Check whether the object requires extra 728 * pages when placed in system-memory, in order to save and later restore the 729 * flat-CCS aux state when the object is moved between local-memory and 730 * system-memory 731 * @obj: Pointer to the object 732 * 733 * Return: True if the object needs extra ccs pages. False otherwise. 734 */ 735 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) 736 { 737 bool lmem_placement = false; 738 int i; 739 740 if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) 741 return false; 742 743 for (i = 0; i < obj->mm.n_placements; i++) { 744 /* Compression is not allowed for the objects with smem placement */ 745 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) 746 return false; 747 if (!lmem_placement && 748 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL) 749 lmem_placement = true; 750 } 751 752 return lmem_placement; 753 } 754 755 void i915_gem_init__objects(struct drm_i915_private *i915) 756 { 757 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); 758 } 759 760 void i915_objects_module_exit(void) 761 { 762 kmem_cache_destroy(slab_objects); 763 } 764 765 int __init i915_objects_module_init(void) 766 { 767 slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 768 if (!slab_objects) 769 return -ENOMEM; 770 771 return 0; 772 } 773 774 static const struct drm_gem_object_funcs i915_gem_object_funcs = { 775 .free = i915_gem_free_object, 776 .close = i915_gem_close_object, 777 .export = i915_gem_prime_export, 778 }; 779 780 /** 781 * i915_gem_object_get_moving_fence - Get the object's moving fence if any 782 * @obj: The object whose moving fence to get. 783 * @fence: The resulting fence 784 * 785 * A non-signaled moving fence means that there is an async operation 786 * pending on the object that needs to be waited on before setting up 787 * any GPU- or CPU PTEs to the object's pages. 788 * 789 * Return: Negative error code or 0 for success. 790 */ 791 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, 792 struct dma_fence **fence) 793 { 794 return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL, 795 fence); 796 } 797 798 /** 799 * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any 800 * @obj: The object whose moving fence to wait for. 801 * @intr: Whether to wait interruptible. 802 * 803 * If the moving fence signaled without an error, it is detached from the 804 * object and put. 805 * 806 * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, 807 * negative error code if the async operation represented by the 808 * moving fence failed. 809 */ 810 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, 811 bool intr) 812 { 813 long ret; 814 815 assert_object_held(obj); 816 817 ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, 818 intr, MAX_SCHEDULE_TIMEOUT); 819 if (!ret) 820 ret = -ETIME; 821 else if (ret > 0 && i915_gem_object_has_unknown_state(obj)) 822 ret = -EIO; 823 824 return ret < 0 ? ret : 0; 825 } 826 827 /** 828 * i915_gem_object_has_unknown_state - Return true if the object backing pages are 829 * in an unknown_state. This means that userspace must NEVER be allowed to touch 830 * the pages, with either the GPU or CPU. 831 * 832 * ONLY valid to be called after ensuring that all kernel fences have signalled 833 * (in particular the fence for moving/clearing the object). 834 */ 835 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj) 836 { 837 /* 838 * The below barrier pairs with the dma_fence_signal() in 839 * __memcpy_work(). We should only sample the unknown_state after all 840 * the kernel fences have signalled. 841 */ 842 smp_rmb(); 843 return obj->mm.unknown_state; 844 } 845 846 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 847 #include "selftests/huge_gem_object.c" 848 #include "selftests/huge_pages.c" 849 #include "selftests/i915_gem_migrate.c" 850 #include "selftests/i915_gem_object.c" 851 #include "selftests/i915_gem_coherency.c" 852 #endif 853