1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/shmem_fs.h> 9 #include <linux/swap.h> 10 11 #include <drm/drm_cache.h> 12 13 #include "gem/i915_gem_region.h" 14 #include "i915_drv.h" 15 #include "i915_gemfs.h" 16 #include "i915_gem_object.h" 17 #include "i915_scatterlist.h" 18 #include "i915_trace.h" 19 20 /* 21 * Move pages to appropriate lru and release the pagevec, decrementing the 22 * ref count of those pages. 23 */ 24 static void check_release_pagevec(struct pagevec *pvec) 25 { 26 check_move_unevictable_pages(pvec); 27 __pagevec_release(pvec); 28 cond_resched(); 29 } 30 31 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 32 bool dirty, bool backup) 33 { 34 struct sgt_iter sgt_iter; 35 struct pagevec pvec; 36 struct page *page; 37 38 mapping_clear_unevictable(mapping); 39 40 pagevec_init(&pvec); 41 for_each_sgt_page(page, sgt_iter, st) { 42 if (dirty) 43 set_page_dirty(page); 44 45 if (backup) 46 mark_page_accessed(page); 47 48 if (!pagevec_add(&pvec, page)) 49 check_release_pagevec(&pvec); 50 } 51 if (pagevec_count(&pvec)) 52 check_release_pagevec(&pvec); 53 54 sg_free_table(st); 55 } 56 57 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 58 size_t size, struct intel_memory_region *mr, 59 struct address_space *mapping, 60 unsigned int max_segment) 61 { 62 const unsigned long page_count = size / PAGE_SIZE; 63 unsigned long i; 64 struct scatterlist *sg; 65 struct page *page; 66 unsigned long last_pfn = 0; /* suppress gcc warning */ 67 gfp_t noreclaim; 68 int ret; 69 70 /* 71 * If there's no chance of allocating enough pages for the whole 72 * object, bail early. 73 */ 74 if (size > resource_size(&mr->region)) 75 return -ENOMEM; 76 77 if (sg_alloc_table(st, page_count, GFP_KERNEL)) 78 return -ENOMEM; 79 80 /* 81 * Get the list of pages out of our struct file. They'll be pinned 82 * at this point until we release them. 83 * 84 * Fail silently without starting the shrinker 85 */ 86 mapping_set_unevictable(mapping); 87 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 88 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 89 90 sg = st->sgl; 91 st->nents = 0; 92 for (i = 0; i < page_count; i++) { 93 const unsigned int shrink[] = { 94 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 95 0, 96 }, *s = shrink; 97 gfp_t gfp = noreclaim; 98 99 do { 100 cond_resched(); 101 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 102 if (!IS_ERR(page)) 103 break; 104 105 if (!*s) { 106 ret = PTR_ERR(page); 107 goto err_sg; 108 } 109 110 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 111 112 /* 113 * We've tried hard to allocate the memory by reaping 114 * our own buffer, now let the real VM do its job and 115 * go down in flames if truly OOM. 116 * 117 * However, since graphics tend to be disposable, 118 * defer the oom here by reporting the ENOMEM back 119 * to userspace. 120 */ 121 if (!*s) { 122 /* reclaim and warn, but no oom */ 123 gfp = mapping_gfp_mask(mapping); 124 125 /* 126 * Our bo are always dirty and so we require 127 * kswapd to reclaim our pages (direct reclaim 128 * does not effectively begin pageout of our 129 * buffers on its own). However, direct reclaim 130 * only waits for kswapd when under allocation 131 * congestion. So as a result __GFP_RECLAIM is 132 * unreliable and fails to actually reclaim our 133 * dirty pages -- unless you try over and over 134 * again with !__GFP_NORETRY. However, we still 135 * want to fail this allocation rather than 136 * trigger the out-of-memory killer and for 137 * this we want __GFP_RETRY_MAYFAIL. 138 */ 139 gfp |= __GFP_RETRY_MAYFAIL; 140 } 141 } while (1); 142 143 if (!i || 144 sg->length >= max_segment || 145 page_to_pfn(page) != last_pfn + 1) { 146 if (i) 147 sg = sg_next(sg); 148 149 st->nents++; 150 sg_set_page(sg, page, PAGE_SIZE, 0); 151 } else { 152 sg->length += PAGE_SIZE; 153 } 154 last_pfn = page_to_pfn(page); 155 156 /* Check that the i965g/gm workaround works. */ 157 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 158 } 159 if (sg) /* loop terminated early; short sg table */ 160 sg_mark_end(sg); 161 162 /* Trim unused sg entries to avoid wasting memory. */ 163 i915_sg_trim(st); 164 165 return 0; 166 err_sg: 167 sg_mark_end(sg); 168 if (sg != st->sgl) { 169 shmem_sg_free_table(st, mapping, false, false); 170 } else { 171 mapping_clear_unevictable(mapping); 172 sg_free_table(st); 173 } 174 175 /* 176 * shmemfs first checks if there is enough memory to allocate the page 177 * and reports ENOSPC should there be insufficient, along with the usual 178 * ENOMEM for a genuine allocation failure. 179 * 180 * We use ENOSPC in our driver to mean that we have run out of aperture 181 * space and so want to translate the error from shmemfs back to our 182 * usual understanding of ENOMEM. 183 */ 184 if (ret == -ENOSPC) 185 ret = -ENOMEM; 186 187 return ret; 188 } 189 190 static int shmem_get_pages(struct drm_i915_gem_object *obj) 191 { 192 struct drm_i915_private *i915 = to_i915(obj->base.dev); 193 struct intel_memory_region *mem = obj->mm.region; 194 struct address_space *mapping = obj->base.filp->f_mapping; 195 const unsigned long page_count = obj->base.size / PAGE_SIZE; 196 unsigned int max_segment = i915_sg_segment_size(); 197 struct sg_table *st; 198 struct sgt_iter sgt_iter; 199 struct page *page; 200 int ret; 201 202 /* 203 * Assert that the object is not currently in any GPU domain. As it 204 * wasn't in the GTT, there shouldn't be any way it could have been in 205 * a GPU cache 206 */ 207 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 208 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 209 210 rebuild_st: 211 st = kmalloc(sizeof(*st), GFP_KERNEL); 212 if (!st) 213 return -ENOMEM; 214 215 ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, 216 max_segment); 217 if (ret) 218 goto err_st; 219 220 ret = i915_gem_gtt_prepare_pages(obj, st); 221 if (ret) { 222 /* 223 * DMA remapping failed? One possible cause is that 224 * it could not reserve enough large entries, asking 225 * for PAGE_SIZE chunks instead may be helpful. 226 */ 227 if (max_segment > PAGE_SIZE) { 228 for_each_sgt_page(page, sgt_iter, st) 229 put_page(page); 230 sg_free_table(st); 231 kfree(st); 232 233 max_segment = PAGE_SIZE; 234 goto rebuild_st; 235 } else { 236 dev_warn(i915->drm.dev, 237 "Failed to DMA remap %lu pages\n", 238 page_count); 239 goto err_pages; 240 } 241 } 242 243 if (i915_gem_object_needs_bit17_swizzle(obj)) 244 i915_gem_object_do_bit_17_swizzle(obj, st); 245 246 if (i915_gem_object_can_bypass_llc(obj)) 247 obj->cache_dirty = true; 248 249 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); 250 251 return 0; 252 253 err_pages: 254 shmem_sg_free_table(st, mapping, false, false); 255 /* 256 * shmemfs first checks if there is enough memory to allocate the page 257 * and reports ENOSPC should there be insufficient, along with the usual 258 * ENOMEM for a genuine allocation failure. 259 * 260 * We use ENOSPC in our driver to mean that we have run out of aperture 261 * space and so want to translate the error from shmemfs back to our 262 * usual understanding of ENOMEM. 263 */ 264 err_st: 265 if (ret == -ENOSPC) 266 ret = -ENOMEM; 267 268 kfree(st); 269 270 return ret; 271 } 272 273 static int 274 shmem_truncate(struct drm_i915_gem_object *obj) 275 { 276 /* 277 * Our goal here is to return as much of the memory as 278 * is possible back to the system as we are called from OOM. 279 * To do this we must instruct the shmfs to drop all of its 280 * backing pages, *now*. 281 */ 282 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 283 obj->mm.madv = __I915_MADV_PURGED; 284 obj->mm.pages = ERR_PTR(-EFAULT); 285 286 return 0; 287 } 288 289 void __shmem_writeback(size_t size, struct address_space *mapping) 290 { 291 struct writeback_control wbc = { 292 .sync_mode = WB_SYNC_NONE, 293 .nr_to_write = SWAP_CLUSTER_MAX, 294 .range_start = 0, 295 .range_end = LLONG_MAX, 296 .for_reclaim = 1, 297 }; 298 unsigned long i; 299 300 /* 301 * Leave mmapings intact (GTT will have been revoked on unbinding, 302 * leaving only CPU mmapings around) and add those pages to the LRU 303 * instead of invoking writeback so they are aged and paged out 304 * as normal. 305 */ 306 307 /* Begin writeback on each dirty page */ 308 for (i = 0; i < size >> PAGE_SHIFT; i++) { 309 struct page *page; 310 311 page = find_lock_page(mapping, i); 312 if (!page) 313 continue; 314 315 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 316 int ret; 317 318 SetPageReclaim(page); 319 ret = mapping->a_ops->writepage(page, &wbc); 320 if (!PageWriteback(page)) 321 ClearPageReclaim(page); 322 if (!ret) 323 goto put; 324 } 325 unlock_page(page); 326 put: 327 put_page(page); 328 } 329 } 330 331 static void 332 shmem_writeback(struct drm_i915_gem_object *obj) 333 { 334 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); 335 } 336 337 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags) 338 { 339 switch (obj->mm.madv) { 340 case I915_MADV_DONTNEED: 341 return i915_gem_object_truncate(obj); 342 case __I915_MADV_PURGED: 343 return 0; 344 } 345 346 if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK) 347 shmem_writeback(obj); 348 349 return 0; 350 } 351 352 void 353 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 354 struct sg_table *pages, 355 bool needs_clflush) 356 { 357 struct drm_i915_private *i915 = to_i915(obj->base.dev); 358 359 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 360 361 if (obj->mm.madv == I915_MADV_DONTNEED) 362 obj->mm.dirty = false; 363 364 if (needs_clflush && 365 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 366 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 367 drm_clflush_sg(pages); 368 369 __start_cpu_write(obj); 370 /* 371 * On non-LLC platforms, force the flush-on-acquire if this is ever 372 * swapped-in. Our async flush path is not trust worthy enough yet(and 373 * happens in the wrong order), and with some tricks it's conceivable 374 * for userspace to change the cache-level to I915_CACHE_NONE after the 375 * pages are swapped-in, and since execbuf binds the object before doing 376 * the async flush, we have a race window. 377 */ 378 if (!HAS_LLC(i915)) 379 obj->cache_dirty = true; 380 } 381 382 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 383 { 384 __i915_gem_object_release_shmem(obj, pages, true); 385 386 i915_gem_gtt_finish_pages(obj, pages); 387 388 if (i915_gem_object_needs_bit17_swizzle(obj)) 389 i915_gem_object_save_bit_17_swizzle(obj, pages); 390 391 shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, 392 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); 393 kfree(pages); 394 obj->mm.dirty = false; 395 } 396 397 static void 398 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 399 { 400 if (likely(i915_gem_object_has_struct_page(obj))) 401 i915_gem_object_put_pages_shmem(obj, pages); 402 else 403 i915_gem_object_put_pages_phys(obj, pages); 404 } 405 406 static int 407 shmem_pwrite(struct drm_i915_gem_object *obj, 408 const struct drm_i915_gem_pwrite *arg) 409 { 410 struct address_space *mapping = obj->base.filp->f_mapping; 411 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 412 u64 remain, offset; 413 unsigned int pg; 414 415 /* Caller already validated user args */ 416 GEM_BUG_ON(!access_ok(user_data, arg->size)); 417 418 if (!i915_gem_object_has_struct_page(obj)) 419 return i915_gem_object_pwrite_phys(obj, arg); 420 421 /* 422 * Before we instantiate/pin the backing store for our use, we 423 * can prepopulate the shmemfs filp efficiently using a write into 424 * the pagecache. We avoid the penalty of instantiating all the 425 * pages, important if the user is just writing to a few and never 426 * uses the object on the GPU, and using a direct write into shmemfs 427 * allows it to avoid the cost of retrieving a page (either swapin 428 * or clearing-before-use) before it is overwritten. 429 */ 430 if (i915_gem_object_has_pages(obj)) 431 return -ENODEV; 432 433 if (obj->mm.madv != I915_MADV_WILLNEED) 434 return -EFAULT; 435 436 /* 437 * Before the pages are instantiated the object is treated as being 438 * in the CPU domain. The pages will be clflushed as required before 439 * use, and we can freely write into the pages directly. If userspace 440 * races pwrite with any other operation; corruption will ensue - 441 * that is userspace's prerogative! 442 */ 443 444 remain = arg->size; 445 offset = arg->offset; 446 pg = offset_in_page(offset); 447 448 do { 449 unsigned int len, unwritten; 450 struct page *page; 451 void *data, *vaddr; 452 int err; 453 char c; 454 455 len = PAGE_SIZE - pg; 456 if (len > remain) 457 len = remain; 458 459 /* Prefault the user page to reduce potential recursion */ 460 err = __get_user(c, user_data); 461 if (err) 462 return err; 463 464 err = __get_user(c, user_data + len - 1); 465 if (err) 466 return err; 467 468 err = pagecache_write_begin(obj->base.filp, mapping, 469 offset, len, 0, 470 &page, &data); 471 if (err < 0) 472 return err; 473 474 vaddr = kmap_atomic(page); 475 unwritten = __copy_from_user_inatomic(vaddr + pg, 476 user_data, 477 len); 478 kunmap_atomic(vaddr); 479 480 err = pagecache_write_end(obj->base.filp, mapping, 481 offset, len, len - unwritten, 482 page, data); 483 if (err < 0) 484 return err; 485 486 /* We don't handle -EFAULT, leave it to the caller to check */ 487 if (unwritten) 488 return -ENODEV; 489 490 remain -= len; 491 user_data += len; 492 offset += len; 493 pg = 0; 494 } while (remain); 495 496 return 0; 497 } 498 499 static int 500 shmem_pread(struct drm_i915_gem_object *obj, 501 const struct drm_i915_gem_pread *arg) 502 { 503 if (!i915_gem_object_has_struct_page(obj)) 504 return i915_gem_object_pread_phys(obj, arg); 505 506 return -ENODEV; 507 } 508 509 static void shmem_release(struct drm_i915_gem_object *obj) 510 { 511 if (i915_gem_object_has_struct_page(obj)) 512 i915_gem_object_release_memory_region(obj); 513 514 fput(obj->base.filp); 515 } 516 517 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 518 .name = "i915_gem_object_shmem", 519 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 520 521 .get_pages = shmem_get_pages, 522 .put_pages = shmem_put_pages, 523 .truncate = shmem_truncate, 524 .shrink = shmem_shrink, 525 526 .pwrite = shmem_pwrite, 527 .pread = shmem_pread, 528 529 .release = shmem_release, 530 }; 531 532 static int __create_shmem(struct drm_i915_private *i915, 533 struct drm_gem_object *obj, 534 resource_size_t size) 535 { 536 unsigned long flags = VM_NORESERVE; 537 struct file *filp; 538 539 drm_gem_private_object_init(&i915->drm, obj, size); 540 541 if (i915->mm.gemfs) 542 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 543 flags); 544 else 545 filp = shmem_file_setup("i915", size, flags); 546 if (IS_ERR(filp)) 547 return PTR_ERR(filp); 548 549 obj->filp = filp; 550 return 0; 551 } 552 553 static int shmem_object_init(struct intel_memory_region *mem, 554 struct drm_i915_gem_object *obj, 555 resource_size_t size, 556 resource_size_t page_size, 557 unsigned int flags) 558 { 559 static struct lock_class_key lock_class; 560 struct drm_i915_private *i915 = mem->i915; 561 struct address_space *mapping; 562 unsigned int cache_level; 563 gfp_t mask; 564 int ret; 565 566 ret = __create_shmem(i915, &obj->base, size); 567 if (ret) 568 return ret; 569 570 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 571 if (IS_I965GM(i915) || IS_I965G(i915)) { 572 /* 965gm cannot relocate objects above 4GiB. */ 573 mask &= ~__GFP_HIGHMEM; 574 mask |= __GFP_DMA32; 575 } 576 577 mapping = obj->base.filp->f_mapping; 578 mapping_set_gfp_mask(mapping, mask); 579 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 580 581 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 582 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 583 obj->write_domain = I915_GEM_DOMAIN_CPU; 584 obj->read_domains = I915_GEM_DOMAIN_CPU; 585 586 if (HAS_LLC(i915)) 587 /* On some devices, we can have the GPU use the LLC (the CPU 588 * cache) for about a 10% performance improvement 589 * compared to uncached. Graphics requests other than 590 * display scanout are coherent with the CPU in 591 * accessing this cache. This means in this mode we 592 * don't need to clflush on the CPU side, and on the 593 * GPU side we only need to flush internal caches to 594 * get data visible to the CPU. 595 * 596 * However, we maintain the display planes as UC, and so 597 * need to rebind when first used as such. 598 */ 599 cache_level = I915_CACHE_LLC; 600 else 601 cache_level = I915_CACHE_NONE; 602 603 i915_gem_object_set_cache_coherency(obj, cache_level); 604 605 i915_gem_object_init_memory_region(obj, mem); 606 607 return 0; 608 } 609 610 struct drm_i915_gem_object * 611 i915_gem_object_create_shmem(struct drm_i915_private *i915, 612 resource_size_t size) 613 { 614 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 615 size, 0, 0); 616 } 617 618 /* Allocate a new GEM object and fill it with the supplied data */ 619 struct drm_i915_gem_object * 620 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 621 const void *data, resource_size_t size) 622 { 623 struct drm_i915_gem_object *obj; 624 struct file *file; 625 resource_size_t offset; 626 int err; 627 628 GEM_WARN_ON(IS_DGFX(dev_priv)); 629 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 630 if (IS_ERR(obj)) 631 return obj; 632 633 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 634 635 file = obj->base.filp; 636 offset = 0; 637 do { 638 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 639 struct page *page; 640 void *pgdata, *vaddr; 641 642 err = pagecache_write_begin(file, file->f_mapping, 643 offset, len, 0, 644 &page, &pgdata); 645 if (err < 0) 646 goto fail; 647 648 vaddr = kmap(page); 649 memcpy(vaddr, data, len); 650 kunmap(page); 651 652 err = pagecache_write_end(file, file->f_mapping, 653 offset, len, len, 654 page, pgdata); 655 if (err < 0) 656 goto fail; 657 658 size -= len; 659 data += len; 660 offset += len; 661 } while (size); 662 663 return obj; 664 665 fail: 666 i915_gem_object_put(obj); 667 return ERR_PTR(err); 668 } 669 670 static int init_shmem(struct intel_memory_region *mem) 671 { 672 int err; 673 674 err = i915_gemfs_init(mem->i915); 675 if (err) { 676 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 677 err); 678 } 679 680 intel_memory_region_set_name(mem, "system"); 681 682 return 0; /* Don't error, we can simply fallback to the kernel mnt */ 683 } 684 685 static int release_shmem(struct intel_memory_region *mem) 686 { 687 i915_gemfs_fini(mem->i915); 688 return 0; 689 } 690 691 static const struct intel_memory_region_ops shmem_region_ops = { 692 .init = init_shmem, 693 .release = release_shmem, 694 .init_object = shmem_object_init, 695 }; 696 697 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 698 u16 type, u16 instance) 699 { 700 return intel_memory_region_create(i915, 0, 701 totalram_pages() << PAGE_SHIFT, 702 PAGE_SIZE, 0, 0, 703 type, instance, 704 &shmem_region_ops); 705 } 706 707 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 708 { 709 return obj->ops == &i915_gem_shmem_ops; 710 } 711