1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <drm/drm_cache.h> 8 9 #include "gt/intel_gt.h" 10 #include "gt/intel_gt_pm.h" 11 12 #include "i915_drv.h" 13 #include "i915_gem_object.h" 14 #include "i915_scatterlist.h" 15 #include "i915_gem_lmem.h" 16 #include "i915_gem_mman.h" 17 18 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 19 struct sg_table *pages, 20 unsigned int sg_page_sizes) 21 { 22 struct drm_i915_private *i915 = to_i915(obj->base.dev); 23 unsigned long supported = RUNTIME_INFO(i915)->page_sizes; 24 bool shrinkable; 25 int i; 26 27 assert_object_held_shared(obj); 28 29 if (i915_gem_object_is_volatile(obj)) 30 obj->mm.madv = I915_MADV_DONTNEED; 31 32 /* Make the pages coherent with the GPU (flushing any swapin). */ 33 if (obj->cache_dirty) { 34 WARN_ON_ONCE(IS_DGFX(i915)); 35 obj->write_domain = 0; 36 if (i915_gem_object_has_struct_page(obj)) 37 drm_clflush_sg(pages); 38 obj->cache_dirty = false; 39 } 40 41 obj->mm.get_page.sg_pos = pages->sgl; 42 obj->mm.get_page.sg_idx = 0; 43 obj->mm.get_dma_page.sg_pos = pages->sgl; 44 obj->mm.get_dma_page.sg_idx = 0; 45 46 obj->mm.pages = pages; 47 48 GEM_BUG_ON(!sg_page_sizes); 49 obj->mm.page_sizes.phys = sg_page_sizes; 50 51 /* 52 * Calculate the supported page-sizes which fit into the given 53 * sg_page_sizes. This will give us the page-sizes which we may be able 54 * to use opportunistically when later inserting into the GTT. For 55 * example if phys=2G, then in theory we should be able to use 1G, 2M, 56 * 64K or 4K pages, although in practice this will depend on a number of 57 * other factors. 58 */ 59 obj->mm.page_sizes.sg = 0; 60 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 61 if (obj->mm.page_sizes.phys & ~0u << i) 62 obj->mm.page_sizes.sg |= BIT(i); 63 } 64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 65 66 shrinkable = i915_gem_object_is_shrinkable(obj); 67 68 if (i915_gem_object_is_tiled(obj) && 69 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { 70 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 71 i915_gem_object_set_tiling_quirk(obj); 72 GEM_BUG_ON(!list_empty(&obj->mm.link)); 73 atomic_inc(&obj->mm.shrink_pin); 74 shrinkable = false; 75 } 76 77 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) { 78 struct list_head *list; 79 unsigned long flags; 80 81 assert_object_held(obj); 82 spin_lock_irqsave(&i915->mm.obj_lock, flags); 83 84 i915->mm.shrink_count++; 85 i915->mm.shrink_memory += obj->base.size; 86 87 if (obj->mm.madv != I915_MADV_WILLNEED) 88 list = &i915->mm.purge_list; 89 else 90 list = &i915->mm.shrink_list; 91 list_add_tail(&obj->mm.link, list); 92 93 atomic_set(&obj->mm.shrink_pin, 0); 94 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 95 } 96 } 97 98 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 99 { 100 struct drm_i915_private *i915 = to_i915(obj->base.dev); 101 int err; 102 103 assert_object_held_shared(obj); 104 105 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 106 drm_dbg(&i915->drm, 107 "Attempting to obtain a purgeable object\n"); 108 return -EFAULT; 109 } 110 111 err = obj->ops->get_pages(obj); 112 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); 113 114 return err; 115 } 116 117 /* Ensure that the associated pages are gathered from the backing storage 118 * and pinned into our object. i915_gem_object_pin_pages() may be called 119 * multiple times before they are released by a single call to 120 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 121 * either as a result of memory pressure (reaping pages under the shrinker) 122 * or as the object is itself released. 123 */ 124 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 125 { 126 int err; 127 128 assert_object_held(obj); 129 130 assert_object_held_shared(obj); 131 132 if (unlikely(!i915_gem_object_has_pages(obj))) { 133 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 134 135 err = ____i915_gem_object_get_pages(obj); 136 if (err) 137 return err; 138 139 smp_mb__before_atomic(); 140 } 141 atomic_inc(&obj->mm.pages_pin_count); 142 143 return 0; 144 } 145 146 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) 147 { 148 struct i915_gem_ww_ctx ww; 149 int err; 150 151 i915_gem_ww_ctx_init(&ww, true); 152 retry: 153 err = i915_gem_object_lock(obj, &ww); 154 if (!err) 155 err = i915_gem_object_pin_pages(obj); 156 157 if (err == -EDEADLK) { 158 err = i915_gem_ww_ctx_backoff(&ww); 159 if (!err) 160 goto retry; 161 } 162 i915_gem_ww_ctx_fini(&ww); 163 return err; 164 } 165 166 /* Immediately discard the backing storage */ 167 int i915_gem_object_truncate(struct drm_i915_gem_object *obj) 168 { 169 if (obj->ops->truncate) 170 return obj->ops->truncate(obj); 171 172 return 0; 173 } 174 175 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 176 { 177 struct radix_tree_iter iter; 178 void __rcu **slot; 179 180 rcu_read_lock(); 181 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 182 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 183 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0) 184 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index); 185 rcu_read_unlock(); 186 } 187 188 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) 189 { 190 if (is_vmalloc_addr(ptr)) 191 vunmap(ptr); 192 } 193 194 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) 195 { 196 struct drm_i915_private *i915 = to_i915(obj->base.dev); 197 struct intel_gt *gt = to_gt(i915); 198 199 if (!obj->mm.tlb) 200 return; 201 202 intel_gt_invalidate_tlb(gt, obj->mm.tlb); 203 obj->mm.tlb = 0; 204 } 205 206 struct sg_table * 207 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) 208 { 209 struct sg_table *pages; 210 211 assert_object_held_shared(obj); 212 213 pages = fetch_and_zero(&obj->mm.pages); 214 if (IS_ERR_OR_NULL(pages)) 215 return pages; 216 217 if (i915_gem_object_is_volatile(obj)) 218 obj->mm.madv = I915_MADV_WILLNEED; 219 220 if (!i915_gem_object_has_self_managed_shrink_list(obj)) 221 i915_gem_object_make_unshrinkable(obj); 222 223 if (obj->mm.mapping) { 224 unmap_object(obj, page_mask_bits(obj->mm.mapping)); 225 obj->mm.mapping = NULL; 226 } 227 228 __i915_gem_object_reset_page_iter(obj); 229 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 230 231 flush_tlb_invalidate(obj); 232 233 return pages; 234 } 235 236 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 237 { 238 struct sg_table *pages; 239 240 if (i915_gem_object_has_pinned_pages(obj)) 241 return -EBUSY; 242 243 /* May be called by shrinker from within get_pages() (on another bo) */ 244 assert_object_held_shared(obj); 245 246 i915_gem_object_release_mmap_offset(obj); 247 248 /* 249 * ->put_pages might need to allocate memory for the bit17 swizzle 250 * array, hence protect them from being reaped by removing them from gtt 251 * lists early. 252 */ 253 pages = __i915_gem_object_unset_pages(obj); 254 255 /* 256 * XXX Temporary hijinx to avoid updating all backends to handle 257 * NULL pages. In the future, when we have more asynchronous 258 * get_pages backends we should be better able to handle the 259 * cancellation of the async task in a more uniform manner. 260 */ 261 if (!IS_ERR_OR_NULL(pages)) 262 obj->ops->put_pages(obj, pages); 263 264 return 0; 265 } 266 267 /* The 'mapping' part of i915_gem_object_pin_map() below */ 268 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, 269 enum i915_map_type type) 270 { 271 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; 272 struct page *stack[32], **pages = stack, *page; 273 struct sgt_iter iter; 274 pgprot_t pgprot; 275 void *vaddr; 276 277 switch (type) { 278 default: 279 MISSING_CASE(type); 280 fallthrough; /* to use PAGE_KERNEL anyway */ 281 case I915_MAP_WB: 282 /* 283 * On 32b, highmem using a finite set of indirect PTE (i.e. 284 * vmap) to provide virtual mappings of the high pages. 285 * As these are finite, map_new_virtual() must wait for some 286 * other kmap() to finish when it runs out. If we map a large 287 * number of objects, there is no method for it to tell us 288 * to release the mappings, and we deadlock. 289 * 290 * However, if we make an explicit vmap of the page, that 291 * uses a larger vmalloc arena, and also has the ability 292 * to tell us to release unwanted mappings. Most importantly, 293 * it will fail and propagate an error instead of waiting 294 * forever. 295 * 296 * So if the page is beyond the 32b boundary, make an explicit 297 * vmap. 298 */ 299 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) 300 return page_address(sg_page(obj->mm.pages->sgl)); 301 pgprot = PAGE_KERNEL; 302 break; 303 case I915_MAP_WC: 304 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 305 break; 306 } 307 308 if (n_pages > ARRAY_SIZE(stack)) { 309 /* Too big for stack -- allocate temporary array instead */ 310 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 311 if (!pages) 312 return ERR_PTR(-ENOMEM); 313 } 314 315 i = 0; 316 for_each_sgt_page(page, iter, obj->mm.pages) 317 pages[i++] = page; 318 vaddr = vmap(pages, n_pages, 0, pgprot); 319 if (pages != stack) 320 kvfree(pages); 321 322 return vaddr ?: ERR_PTR(-ENOMEM); 323 } 324 325 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, 326 enum i915_map_type type) 327 { 328 resource_size_t iomap = obj->mm.region->iomap.base - 329 obj->mm.region->region.start; 330 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT; 331 unsigned long stack[32], *pfns = stack, i; 332 struct sgt_iter iter; 333 dma_addr_t addr; 334 void *vaddr; 335 336 GEM_BUG_ON(type != I915_MAP_WC); 337 338 if (n_pfn > ARRAY_SIZE(stack)) { 339 /* Too big for stack -- allocate temporary array instead */ 340 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); 341 if (!pfns) 342 return ERR_PTR(-ENOMEM); 343 } 344 345 i = 0; 346 for_each_sgt_daddr(addr, iter, obj->mm.pages) 347 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; 348 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); 349 if (pfns != stack) 350 kvfree(pfns); 351 352 return vaddr ?: ERR_PTR(-ENOMEM); 353 } 354 355 /* get, pin, and map the pages of the object into kernel space */ 356 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 357 enum i915_map_type type) 358 { 359 enum i915_map_type has_type; 360 bool pinned; 361 void *ptr; 362 int err; 363 364 if (!i915_gem_object_has_struct_page(obj) && 365 !i915_gem_object_has_iomem(obj)) 366 return ERR_PTR(-ENXIO); 367 368 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY)) 369 return ERR_PTR(-EINVAL); 370 371 assert_object_held(obj); 372 373 pinned = !(type & I915_MAP_OVERRIDE); 374 type &= ~I915_MAP_OVERRIDE; 375 376 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 377 if (unlikely(!i915_gem_object_has_pages(obj))) { 378 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 379 380 err = ____i915_gem_object_get_pages(obj); 381 if (err) 382 return ERR_PTR(err); 383 384 smp_mb__before_atomic(); 385 } 386 atomic_inc(&obj->mm.pages_pin_count); 387 pinned = false; 388 } 389 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 390 391 /* 392 * For discrete our CPU mappings needs to be consistent in order to 393 * function correctly on !x86. When mapping things through TTM, we use 394 * the same rules to determine the caching type. 395 * 396 * The caching rules, starting from DG1: 397 * 398 * - If the object can be placed in device local-memory, then the 399 * pages should be allocated and mapped as write-combined only. 400 * 401 * - Everything else is always allocated and mapped as write-back, 402 * with the guarantee that everything is also coherent with the 403 * GPU. 404 * 405 * Internal users of lmem are already expected to get this right, so no 406 * fudging needed there. 407 */ 408 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) { 409 if (type != I915_MAP_WC && !obj->mm.n_placements) { 410 ptr = ERR_PTR(-ENODEV); 411 goto err_unpin; 412 } 413 414 type = I915_MAP_WC; 415 } else if (IS_DGFX(to_i915(obj->base.dev))) { 416 type = I915_MAP_WB; 417 } 418 419 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 420 if (ptr && has_type != type) { 421 if (pinned) { 422 ptr = ERR_PTR(-EBUSY); 423 goto err_unpin; 424 } 425 426 unmap_object(obj, ptr); 427 428 ptr = obj->mm.mapping = NULL; 429 } 430 431 if (!ptr) { 432 err = i915_gem_object_wait_moving_fence(obj, true); 433 if (err) { 434 ptr = ERR_PTR(err); 435 goto err_unpin; 436 } 437 438 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled())) 439 ptr = ERR_PTR(-ENODEV); 440 else if (i915_gem_object_has_struct_page(obj)) 441 ptr = i915_gem_object_map_page(obj, type); 442 else 443 ptr = i915_gem_object_map_pfn(obj, type); 444 if (IS_ERR(ptr)) 445 goto err_unpin; 446 447 obj->mm.mapping = page_pack_bits(ptr, type); 448 } 449 450 return ptr; 451 452 err_unpin: 453 atomic_dec(&obj->mm.pages_pin_count); 454 return ptr; 455 } 456 457 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 458 enum i915_map_type type) 459 { 460 void *ret; 461 462 i915_gem_object_lock(obj, NULL); 463 ret = i915_gem_object_pin_map(obj, type); 464 i915_gem_object_unlock(obj); 465 466 return ret; 467 } 468 469 enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915, 470 struct drm_i915_gem_object *obj, 471 bool always_coherent) 472 { 473 if (i915_gem_object_is_lmem(obj)) 474 return I915_MAP_WC; 475 if (HAS_LLC(i915) || always_coherent) 476 return I915_MAP_WB; 477 else 478 return I915_MAP_WC; 479 } 480 481 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 482 unsigned long offset, 483 unsigned long size) 484 { 485 enum i915_map_type has_type; 486 void *ptr; 487 488 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 489 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), 490 offset, size, obj->base.size)); 491 492 wmb(); /* let all previous writes be visible to coherent partners */ 493 obj->mm.dirty = true; 494 495 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) 496 return; 497 498 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 499 if (has_type == I915_MAP_WC) 500 return; 501 502 drm_clflush_virt_range(ptr + offset, size); 503 if (size == obj->base.size) { 504 obj->write_domain &= ~I915_GEM_DOMAIN_CPU; 505 obj->cache_dirty = false; 506 } 507 } 508 509 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) 510 { 511 GEM_BUG_ON(!obj->mm.mapping); 512 513 /* 514 * We allow removing the mapping from underneath pinned pages! 515 * 516 * Furthermore, since this is an unsafe operation reserved only 517 * for construction time manipulation, we ignore locking prudence. 518 */ 519 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping))); 520 521 i915_gem_object_unpin_map(obj); 522 } 523 524 struct scatterlist * 525 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 526 struct i915_gem_object_page_iter *iter, 527 unsigned int n, 528 unsigned int *offset, 529 bool dma) 530 { 531 struct scatterlist *sg; 532 unsigned int idx, count; 533 534 might_sleep(); 535 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 536 if (!i915_gem_object_has_pinned_pages(obj)) 537 assert_object_held(obj); 538 539 /* As we iterate forward through the sg, we record each entry in a 540 * radixtree for quick repeated (backwards) lookups. If we have seen 541 * this index previously, we will have an entry for it. 542 * 543 * Initial lookup is O(N), but this is amortized to O(1) for 544 * sequential page access (where each new request is consecutive 545 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 546 * i.e. O(1) with a large constant! 547 */ 548 if (n < READ_ONCE(iter->sg_idx)) 549 goto lookup; 550 551 mutex_lock(&iter->lock); 552 553 /* We prefer to reuse the last sg so that repeated lookup of this 554 * (or the subsequent) sg are fast - comparing against the last 555 * sg is faster than going through the radixtree. 556 */ 557 558 sg = iter->sg_pos; 559 idx = iter->sg_idx; 560 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 561 562 while (idx + count <= n) { 563 void *entry; 564 unsigned long i; 565 int ret; 566 567 /* If we cannot allocate and insert this entry, or the 568 * individual pages from this range, cancel updating the 569 * sg_idx so that on this lookup we are forced to linearly 570 * scan onwards, but on future lookups we will try the 571 * insertion again (in which case we need to be careful of 572 * the error return reporting that we have already inserted 573 * this index). 574 */ 575 ret = radix_tree_insert(&iter->radix, idx, sg); 576 if (ret && ret != -EEXIST) 577 goto scan; 578 579 entry = xa_mk_value(idx); 580 for (i = 1; i < count; i++) { 581 ret = radix_tree_insert(&iter->radix, idx + i, entry); 582 if (ret && ret != -EEXIST) 583 goto scan; 584 } 585 586 idx += count; 587 sg = ____sg_next(sg); 588 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 589 } 590 591 scan: 592 iter->sg_pos = sg; 593 iter->sg_idx = idx; 594 595 mutex_unlock(&iter->lock); 596 597 if (unlikely(n < idx)) /* insertion completed by another thread */ 598 goto lookup; 599 600 /* In case we failed to insert the entry into the radixtree, we need 601 * to look beyond the current sg. 602 */ 603 while (idx + count <= n) { 604 idx += count; 605 sg = ____sg_next(sg); 606 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); 607 } 608 609 *offset = n - idx; 610 return sg; 611 612 lookup: 613 rcu_read_lock(); 614 615 sg = radix_tree_lookup(&iter->radix, n); 616 GEM_BUG_ON(!sg); 617 618 /* If this index is in the middle of multi-page sg entry, 619 * the radix tree will contain a value entry that points 620 * to the start of that range. We will return the pointer to 621 * the base page and the offset of this page within the 622 * sg entry's range. 623 */ 624 *offset = 0; 625 if (unlikely(xa_is_value(sg))) { 626 unsigned long base = xa_to_value(sg); 627 628 sg = radix_tree_lookup(&iter->radix, base); 629 GEM_BUG_ON(!sg); 630 631 *offset = n - base; 632 } 633 634 rcu_read_unlock(); 635 636 return sg; 637 } 638 639 struct page * 640 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 641 { 642 struct scatterlist *sg; 643 unsigned int offset; 644 645 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 646 647 sg = i915_gem_object_get_sg(obj, n, &offset); 648 return nth_page(sg_page(sg), offset); 649 } 650 651 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 652 struct page * 653 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 654 unsigned int n) 655 { 656 struct page *page; 657 658 page = i915_gem_object_get_page(obj, n); 659 if (!obj->mm.dirty) 660 set_page_dirty(page); 661 662 return page; 663 } 664 665 dma_addr_t 666 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 667 unsigned long n, 668 unsigned int *len) 669 { 670 struct scatterlist *sg; 671 unsigned int offset; 672 673 sg = i915_gem_object_get_sg_dma(obj, n, &offset); 674 675 if (len) 676 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); 677 678 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 679 } 680 681 dma_addr_t 682 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 683 unsigned long n) 684 { 685 return i915_gem_object_get_dma_address_len(obj, n, NULL); 686 } 687