1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/swap.h> 9 10 #include <drm/drm_cache.h> 11 12 #include "gem/i915_gem_region.h" 13 #include "i915_drv.h" 14 #include "i915_gemfs.h" 15 #include "i915_gem_object.h" 16 #include "i915_scatterlist.h" 17 #include "i915_trace.h" 18 19 /* 20 * Move pages to appropriate lru and release the pagevec, decrementing the 21 * ref count of those pages. 22 */ 23 static void check_release_pagevec(struct pagevec *pvec) 24 { 25 check_move_unevictable_pages(pvec); 26 __pagevec_release(pvec); 27 cond_resched(); 28 } 29 30 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 31 bool dirty, bool backup) 32 { 33 struct sgt_iter sgt_iter; 34 struct pagevec pvec; 35 struct page *page; 36 37 mapping_clear_unevictable(mapping); 38 39 pagevec_init(&pvec); 40 for_each_sgt_page(page, sgt_iter, st) { 41 if (dirty) 42 set_page_dirty(page); 43 44 if (backup) 45 mark_page_accessed(page); 46 47 if (!pagevec_add(&pvec, page)) 48 check_release_pagevec(&pvec); 49 } 50 if (pagevec_count(&pvec)) 51 check_release_pagevec(&pvec); 52 53 sg_free_table(st); 54 } 55 56 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 57 size_t size, struct intel_memory_region *mr, 58 struct address_space *mapping, 59 unsigned int max_segment) 60 { 61 const unsigned long page_count = size / PAGE_SIZE; 62 unsigned long i; 63 struct scatterlist *sg; 64 struct page *page; 65 unsigned long last_pfn = 0; /* suppress gcc warning */ 66 gfp_t noreclaim; 67 int ret; 68 69 /* 70 * If there's no chance of allocating enough pages for the whole 71 * object, bail early. 72 */ 73 if (size > resource_size(&mr->region)) 74 return -ENOMEM; 75 76 if (sg_alloc_table(st, page_count, GFP_KERNEL)) 77 return -ENOMEM; 78 79 /* 80 * Get the list of pages out of our struct file. They'll be pinned 81 * at this point until we release them. 82 * 83 * Fail silently without starting the shrinker 84 */ 85 mapping_set_unevictable(mapping); 86 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 87 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 88 89 sg = st->sgl; 90 st->nents = 0; 91 for (i = 0; i < page_count; i++) { 92 const unsigned int shrink[] = { 93 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 94 0, 95 }, *s = shrink; 96 gfp_t gfp = noreclaim; 97 98 do { 99 cond_resched(); 100 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 101 if (!IS_ERR(page)) 102 break; 103 104 if (!*s) { 105 ret = PTR_ERR(page); 106 goto err_sg; 107 } 108 109 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 110 111 /* 112 * We've tried hard to allocate the memory by reaping 113 * our own buffer, now let the real VM do its job and 114 * go down in flames if truly OOM. 115 * 116 * However, since graphics tend to be disposable, 117 * defer the oom here by reporting the ENOMEM back 118 * to userspace. 119 */ 120 if (!*s) { 121 /* reclaim and warn, but no oom */ 122 gfp = mapping_gfp_mask(mapping); 123 124 /* 125 * Our bo are always dirty and so we require 126 * kswapd to reclaim our pages (direct reclaim 127 * does not effectively begin pageout of our 128 * buffers on its own). However, direct reclaim 129 * only waits for kswapd when under allocation 130 * congestion. So as a result __GFP_RECLAIM is 131 * unreliable and fails to actually reclaim our 132 * dirty pages -- unless you try over and over 133 * again with !__GFP_NORETRY. However, we still 134 * want to fail this allocation rather than 135 * trigger the out-of-memory killer and for 136 * this we want __GFP_RETRY_MAYFAIL. 137 */ 138 gfp |= __GFP_RETRY_MAYFAIL; 139 } 140 } while (1); 141 142 if (!i || 143 sg->length >= max_segment || 144 page_to_pfn(page) != last_pfn + 1) { 145 if (i) 146 sg = sg_next(sg); 147 148 st->nents++; 149 sg_set_page(sg, page, PAGE_SIZE, 0); 150 } else { 151 sg->length += PAGE_SIZE; 152 } 153 last_pfn = page_to_pfn(page); 154 155 /* Check that the i965g/gm workaround works. */ 156 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 157 } 158 if (sg) /* loop terminated early; short sg table */ 159 sg_mark_end(sg); 160 161 /* Trim unused sg entries to avoid wasting memory. */ 162 i915_sg_trim(st); 163 164 return 0; 165 err_sg: 166 sg_mark_end(sg); 167 if (sg != st->sgl) { 168 shmem_sg_free_table(st, mapping, false, false); 169 } else { 170 mapping_clear_unevictable(mapping); 171 sg_free_table(st); 172 } 173 174 /* 175 * shmemfs first checks if there is enough memory to allocate the page 176 * and reports ENOSPC should there be insufficient, along with the usual 177 * ENOMEM for a genuine allocation failure. 178 * 179 * We use ENOSPC in our driver to mean that we have run out of aperture 180 * space and so want to translate the error from shmemfs back to our 181 * usual understanding of ENOMEM. 182 */ 183 if (ret == -ENOSPC) 184 ret = -ENOMEM; 185 186 return ret; 187 } 188 189 static int shmem_get_pages(struct drm_i915_gem_object *obj) 190 { 191 struct drm_i915_private *i915 = to_i915(obj->base.dev); 192 struct intel_memory_region *mem = obj->mm.region; 193 struct address_space *mapping = obj->base.filp->f_mapping; 194 const unsigned long page_count = obj->base.size / PAGE_SIZE; 195 unsigned int max_segment = i915_sg_segment_size(); 196 struct sg_table *st; 197 struct sgt_iter sgt_iter; 198 struct page *page; 199 int ret; 200 201 /* 202 * Assert that the object is not currently in any GPU domain. As it 203 * wasn't in the GTT, there shouldn't be any way it could have been in 204 * a GPU cache 205 */ 206 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 207 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 208 209 rebuild_st: 210 st = kmalloc(sizeof(*st), GFP_KERNEL); 211 if (!st) 212 return -ENOMEM; 213 214 ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, 215 max_segment); 216 if (ret) 217 goto err_st; 218 219 ret = i915_gem_gtt_prepare_pages(obj, st); 220 if (ret) { 221 /* 222 * DMA remapping failed? One possible cause is that 223 * it could not reserve enough large entries, asking 224 * for PAGE_SIZE chunks instead may be helpful. 225 */ 226 if (max_segment > PAGE_SIZE) { 227 for_each_sgt_page(page, sgt_iter, st) 228 put_page(page); 229 sg_free_table(st); 230 kfree(st); 231 232 max_segment = PAGE_SIZE; 233 goto rebuild_st; 234 } else { 235 dev_warn(i915->drm.dev, 236 "Failed to DMA remap %lu pages\n", 237 page_count); 238 goto err_pages; 239 } 240 } 241 242 if (i915_gem_object_needs_bit17_swizzle(obj)) 243 i915_gem_object_do_bit_17_swizzle(obj, st); 244 245 if (i915_gem_object_can_bypass_llc(obj)) 246 obj->cache_dirty = true; 247 248 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); 249 250 return 0; 251 252 err_pages: 253 shmem_sg_free_table(st, mapping, false, false); 254 /* 255 * shmemfs first checks if there is enough memory to allocate the page 256 * and reports ENOSPC should there be insufficient, along with the usual 257 * ENOMEM for a genuine allocation failure. 258 * 259 * We use ENOSPC in our driver to mean that we have run out of aperture 260 * space and so want to translate the error from shmemfs back to our 261 * usual understanding of ENOMEM. 262 */ 263 err_st: 264 if (ret == -ENOSPC) 265 ret = -ENOMEM; 266 267 kfree(st); 268 269 return ret; 270 } 271 272 static int 273 shmem_truncate(struct drm_i915_gem_object *obj) 274 { 275 /* 276 * Our goal here is to return as much of the memory as 277 * is possible back to the system as we are called from OOM. 278 * To do this we must instruct the shmfs to drop all of its 279 * backing pages, *now*. 280 */ 281 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 282 obj->mm.madv = __I915_MADV_PURGED; 283 obj->mm.pages = ERR_PTR(-EFAULT); 284 285 return 0; 286 } 287 288 void __shmem_writeback(size_t size, struct address_space *mapping) 289 { 290 struct writeback_control wbc = { 291 .sync_mode = WB_SYNC_NONE, 292 .nr_to_write = SWAP_CLUSTER_MAX, 293 .range_start = 0, 294 .range_end = LLONG_MAX, 295 .for_reclaim = 1, 296 }; 297 unsigned long i; 298 299 /* 300 * Leave mmapings intact (GTT will have been revoked on unbinding, 301 * leaving only CPU mmapings around) and add those pages to the LRU 302 * instead of invoking writeback so they are aged and paged out 303 * as normal. 304 */ 305 306 /* Begin writeback on each dirty page */ 307 for (i = 0; i < size >> PAGE_SHIFT; i++) { 308 struct page *page; 309 310 page = find_lock_page(mapping, i); 311 if (!page) 312 continue; 313 314 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 315 int ret; 316 317 SetPageReclaim(page); 318 ret = mapping->a_ops->writepage(page, &wbc); 319 if (!PageWriteback(page)) 320 ClearPageReclaim(page); 321 if (!ret) 322 goto put; 323 } 324 unlock_page(page); 325 put: 326 put_page(page); 327 } 328 } 329 330 static void 331 shmem_writeback(struct drm_i915_gem_object *obj) 332 { 333 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); 334 } 335 336 void 337 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 338 struct sg_table *pages, 339 bool needs_clflush) 340 { 341 struct drm_i915_private *i915 = to_i915(obj->base.dev); 342 343 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 344 345 if (obj->mm.madv == I915_MADV_DONTNEED) 346 obj->mm.dirty = false; 347 348 if (needs_clflush && 349 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 350 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 351 drm_clflush_sg(pages); 352 353 __start_cpu_write(obj); 354 /* 355 * On non-LLC platforms, force the flush-on-acquire if this is ever 356 * swapped-in. Our async flush path is not trust worthy enough yet(and 357 * happens in the wrong order), and with some tricks it's conceivable 358 * for userspace to change the cache-level to I915_CACHE_NONE after the 359 * pages are swapped-in, and since execbuf binds the object before doing 360 * the async flush, we have a race window. 361 */ 362 if (!HAS_LLC(i915)) 363 obj->cache_dirty = true; 364 } 365 366 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 367 { 368 __i915_gem_object_release_shmem(obj, pages, true); 369 370 i915_gem_gtt_finish_pages(obj, pages); 371 372 if (i915_gem_object_needs_bit17_swizzle(obj)) 373 i915_gem_object_save_bit_17_swizzle(obj, pages); 374 375 shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, 376 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); 377 kfree(pages); 378 obj->mm.dirty = false; 379 } 380 381 static void 382 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 383 { 384 if (likely(i915_gem_object_has_struct_page(obj))) 385 i915_gem_object_put_pages_shmem(obj, pages); 386 else 387 i915_gem_object_put_pages_phys(obj, pages); 388 } 389 390 static int 391 shmem_pwrite(struct drm_i915_gem_object *obj, 392 const struct drm_i915_gem_pwrite *arg) 393 { 394 struct address_space *mapping = obj->base.filp->f_mapping; 395 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 396 u64 remain, offset; 397 unsigned int pg; 398 399 /* Caller already validated user args */ 400 GEM_BUG_ON(!access_ok(user_data, arg->size)); 401 402 if (!i915_gem_object_has_struct_page(obj)) 403 return i915_gem_object_pwrite_phys(obj, arg); 404 405 /* 406 * Before we instantiate/pin the backing store for our use, we 407 * can prepopulate the shmemfs filp efficiently using a write into 408 * the pagecache. We avoid the penalty of instantiating all the 409 * pages, important if the user is just writing to a few and never 410 * uses the object on the GPU, and using a direct write into shmemfs 411 * allows it to avoid the cost of retrieving a page (either swapin 412 * or clearing-before-use) before it is overwritten. 413 */ 414 if (i915_gem_object_has_pages(obj)) 415 return -ENODEV; 416 417 if (obj->mm.madv != I915_MADV_WILLNEED) 418 return -EFAULT; 419 420 /* 421 * Before the pages are instantiated the object is treated as being 422 * in the CPU domain. The pages will be clflushed as required before 423 * use, and we can freely write into the pages directly. If userspace 424 * races pwrite with any other operation; corruption will ensue - 425 * that is userspace's prerogative! 426 */ 427 428 remain = arg->size; 429 offset = arg->offset; 430 pg = offset_in_page(offset); 431 432 do { 433 unsigned int len, unwritten; 434 struct page *page; 435 void *data, *vaddr; 436 int err; 437 char c; 438 439 len = PAGE_SIZE - pg; 440 if (len > remain) 441 len = remain; 442 443 /* Prefault the user page to reduce potential recursion */ 444 err = __get_user(c, user_data); 445 if (err) 446 return err; 447 448 err = __get_user(c, user_data + len - 1); 449 if (err) 450 return err; 451 452 err = pagecache_write_begin(obj->base.filp, mapping, 453 offset, len, 0, 454 &page, &data); 455 if (err < 0) 456 return err; 457 458 vaddr = kmap_atomic(page); 459 unwritten = __copy_from_user_inatomic(vaddr + pg, 460 user_data, 461 len); 462 kunmap_atomic(vaddr); 463 464 err = pagecache_write_end(obj->base.filp, mapping, 465 offset, len, len - unwritten, 466 page, data); 467 if (err < 0) 468 return err; 469 470 /* We don't handle -EFAULT, leave it to the caller to check */ 471 if (unwritten) 472 return -ENODEV; 473 474 remain -= len; 475 user_data += len; 476 offset += len; 477 pg = 0; 478 } while (remain); 479 480 return 0; 481 } 482 483 static int 484 shmem_pread(struct drm_i915_gem_object *obj, 485 const struct drm_i915_gem_pread *arg) 486 { 487 if (!i915_gem_object_has_struct_page(obj)) 488 return i915_gem_object_pread_phys(obj, arg); 489 490 return -ENODEV; 491 } 492 493 static void shmem_release(struct drm_i915_gem_object *obj) 494 { 495 if (i915_gem_object_has_struct_page(obj)) 496 i915_gem_object_release_memory_region(obj); 497 498 fput(obj->base.filp); 499 } 500 501 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 502 .name = "i915_gem_object_shmem", 503 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 504 505 .get_pages = shmem_get_pages, 506 .put_pages = shmem_put_pages, 507 .truncate = shmem_truncate, 508 .writeback = shmem_writeback, 509 510 .pwrite = shmem_pwrite, 511 .pread = shmem_pread, 512 513 .release = shmem_release, 514 }; 515 516 static int __create_shmem(struct drm_i915_private *i915, 517 struct drm_gem_object *obj, 518 resource_size_t size) 519 { 520 unsigned long flags = VM_NORESERVE; 521 struct file *filp; 522 523 drm_gem_private_object_init(&i915->drm, obj, size); 524 525 if (i915->mm.gemfs) 526 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 527 flags); 528 else 529 filp = shmem_file_setup("i915", size, flags); 530 if (IS_ERR(filp)) 531 return PTR_ERR(filp); 532 533 obj->filp = filp; 534 return 0; 535 } 536 537 static int shmem_object_init(struct intel_memory_region *mem, 538 struct drm_i915_gem_object *obj, 539 resource_size_t size, 540 resource_size_t page_size, 541 unsigned int flags) 542 { 543 static struct lock_class_key lock_class; 544 struct drm_i915_private *i915 = mem->i915; 545 struct address_space *mapping; 546 unsigned int cache_level; 547 gfp_t mask; 548 int ret; 549 550 ret = __create_shmem(i915, &obj->base, size); 551 if (ret) 552 return ret; 553 554 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 555 if (IS_I965GM(i915) || IS_I965G(i915)) { 556 /* 965gm cannot relocate objects above 4GiB. */ 557 mask &= ~__GFP_HIGHMEM; 558 mask |= __GFP_DMA32; 559 } 560 561 mapping = obj->base.filp->f_mapping; 562 mapping_set_gfp_mask(mapping, mask); 563 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 564 565 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 566 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 567 obj->write_domain = I915_GEM_DOMAIN_CPU; 568 obj->read_domains = I915_GEM_DOMAIN_CPU; 569 570 if (HAS_LLC(i915)) 571 /* On some devices, we can have the GPU use the LLC (the CPU 572 * cache) for about a 10% performance improvement 573 * compared to uncached. Graphics requests other than 574 * display scanout are coherent with the CPU in 575 * accessing this cache. This means in this mode we 576 * don't need to clflush on the CPU side, and on the 577 * GPU side we only need to flush internal caches to 578 * get data visible to the CPU. 579 * 580 * However, we maintain the display planes as UC, and so 581 * need to rebind when first used as such. 582 */ 583 cache_level = I915_CACHE_LLC; 584 else 585 cache_level = I915_CACHE_NONE; 586 587 i915_gem_object_set_cache_coherency(obj, cache_level); 588 589 i915_gem_object_init_memory_region(obj, mem); 590 591 return 0; 592 } 593 594 struct drm_i915_gem_object * 595 i915_gem_object_create_shmem(struct drm_i915_private *i915, 596 resource_size_t size) 597 { 598 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 599 size, 0, 0); 600 } 601 602 /* Allocate a new GEM object and fill it with the supplied data */ 603 struct drm_i915_gem_object * 604 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 605 const void *data, resource_size_t size) 606 { 607 struct drm_i915_gem_object *obj; 608 struct file *file; 609 resource_size_t offset; 610 int err; 611 612 GEM_WARN_ON(IS_DGFX(dev_priv)); 613 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 614 if (IS_ERR(obj)) 615 return obj; 616 617 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 618 619 file = obj->base.filp; 620 offset = 0; 621 do { 622 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 623 struct page *page; 624 void *pgdata, *vaddr; 625 626 err = pagecache_write_begin(file, file->f_mapping, 627 offset, len, 0, 628 &page, &pgdata); 629 if (err < 0) 630 goto fail; 631 632 vaddr = kmap(page); 633 memcpy(vaddr, data, len); 634 kunmap(page); 635 636 err = pagecache_write_end(file, file->f_mapping, 637 offset, len, len, 638 page, pgdata); 639 if (err < 0) 640 goto fail; 641 642 size -= len; 643 data += len; 644 offset += len; 645 } while (size); 646 647 return obj; 648 649 fail: 650 i915_gem_object_put(obj); 651 return ERR_PTR(err); 652 } 653 654 static int init_shmem(struct intel_memory_region *mem) 655 { 656 int err; 657 658 err = i915_gemfs_init(mem->i915); 659 if (err) { 660 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 661 err); 662 } 663 664 intel_memory_region_set_name(mem, "system"); 665 666 return 0; /* Don't error, we can simply fallback to the kernel mnt */ 667 } 668 669 static int release_shmem(struct intel_memory_region *mem) 670 { 671 i915_gemfs_fini(mem->i915); 672 return 0; 673 } 674 675 static const struct intel_memory_region_ops shmem_region_ops = { 676 .init = init_shmem, 677 .release = release_shmem, 678 .init_object = shmem_object_init, 679 }; 680 681 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 682 u16 type, u16 instance) 683 { 684 return intel_memory_region_create(i915, 0, 685 totalram_pages() << PAGE_SHIFT, 686 PAGE_SIZE, 0, 687 type, instance, 688 &shmem_region_ops); 689 } 690 691 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 692 { 693 return obj->ops == &i915_gem_shmem_ops; 694 } 695