1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/swap.h> 9 10 #include "gem/i915_gem_region.h" 11 #include "i915_drv.h" 12 #include "i915_gemfs.h" 13 #include "i915_gem_object.h" 14 #include "i915_scatterlist.h" 15 #include "i915_trace.h" 16 17 /* 18 * Move pages to appropriate lru and release the pagevec, decrementing the 19 * ref count of those pages. 20 */ 21 static void check_release_pagevec(struct pagevec *pvec) 22 { 23 check_move_unevictable_pages(pvec); 24 __pagevec_release(pvec); 25 cond_resched(); 26 } 27 28 void shmem_free_st(struct sg_table *st, struct address_space *mapping, 29 bool dirty, bool backup) 30 { 31 struct sgt_iter sgt_iter; 32 struct pagevec pvec; 33 struct page *page; 34 35 mapping_clear_unevictable(mapping); 36 37 pagevec_init(&pvec); 38 for_each_sgt_page(page, sgt_iter, st) { 39 if (dirty) 40 set_page_dirty(page); 41 42 if (backup) 43 mark_page_accessed(page); 44 45 if (!pagevec_add(&pvec, page)) 46 check_release_pagevec(&pvec); 47 } 48 if (pagevec_count(&pvec)) 49 check_release_pagevec(&pvec); 50 51 sg_free_table(st); 52 kfree(st); 53 } 54 55 struct sg_table *shmem_alloc_st(struct drm_i915_private *i915, 56 size_t size, struct intel_memory_region *mr, 57 struct address_space *mapping, 58 unsigned int max_segment) 59 { 60 const unsigned long page_count = size / PAGE_SIZE; 61 unsigned long i; 62 struct sg_table *st; 63 struct scatterlist *sg; 64 struct page *page; 65 unsigned long last_pfn = 0; /* suppress gcc warning */ 66 gfp_t noreclaim; 67 int ret; 68 69 /* 70 * If there's no chance of allocating enough pages for the whole 71 * object, bail early. 72 */ 73 if (size > resource_size(&mr->region)) 74 return ERR_PTR(-ENOMEM); 75 76 st = kmalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 return ERR_PTR(-ENOMEM); 79 80 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 81 kfree(st); 82 return ERR_PTR(-ENOMEM); 83 } 84 85 /* 86 * Get the list of pages out of our struct file. They'll be pinned 87 * at this point until we release them. 88 * 89 * Fail silently without starting the shrinker 90 */ 91 mapping_set_unevictable(mapping); 92 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 93 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 94 95 sg = st->sgl; 96 st->nents = 0; 97 for (i = 0; i < page_count; i++) { 98 const unsigned int shrink[] = { 99 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 100 0, 101 }, *s = shrink; 102 gfp_t gfp = noreclaim; 103 104 do { 105 cond_resched(); 106 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 107 if (!IS_ERR(page)) 108 break; 109 110 if (!*s) { 111 ret = PTR_ERR(page); 112 goto err_sg; 113 } 114 115 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 116 117 /* 118 * We've tried hard to allocate the memory by reaping 119 * our own buffer, now let the real VM do its job and 120 * go down in flames if truly OOM. 121 * 122 * However, since graphics tend to be disposable, 123 * defer the oom here by reporting the ENOMEM back 124 * to userspace. 125 */ 126 if (!*s) { 127 /* reclaim and warn, but no oom */ 128 gfp = mapping_gfp_mask(mapping); 129 130 /* 131 * Our bo are always dirty and so we require 132 * kswapd to reclaim our pages (direct reclaim 133 * does not effectively begin pageout of our 134 * buffers on its own). However, direct reclaim 135 * only waits for kswapd when under allocation 136 * congestion. So as a result __GFP_RECLAIM is 137 * unreliable and fails to actually reclaim our 138 * dirty pages -- unless you try over and over 139 * again with !__GFP_NORETRY. However, we still 140 * want to fail this allocation rather than 141 * trigger the out-of-memory killer and for 142 * this we want __GFP_RETRY_MAYFAIL. 143 */ 144 gfp |= __GFP_RETRY_MAYFAIL; 145 } 146 } while (1); 147 148 if (!i || 149 sg->length >= max_segment || 150 page_to_pfn(page) != last_pfn + 1) { 151 if (i) 152 sg = sg_next(sg); 153 154 st->nents++; 155 sg_set_page(sg, page, PAGE_SIZE, 0); 156 } else { 157 sg->length += PAGE_SIZE; 158 } 159 last_pfn = page_to_pfn(page); 160 161 /* Check that the i965g/gm workaround works. */ 162 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 163 } 164 if (sg) /* loop terminated early; short sg table */ 165 sg_mark_end(sg); 166 167 /* Trim unused sg entries to avoid wasting memory. */ 168 i915_sg_trim(st); 169 170 return st; 171 err_sg: 172 sg_mark_end(sg); 173 if (sg != st->sgl) { 174 shmem_free_st(st, mapping, false, false); 175 } else { 176 mapping_clear_unevictable(mapping); 177 sg_free_table(st); 178 kfree(st); 179 } 180 181 /* 182 * shmemfs first checks if there is enough memory to allocate the page 183 * and reports ENOSPC should there be insufficient, along with the usual 184 * ENOMEM for a genuine allocation failure. 185 * 186 * We use ENOSPC in our driver to mean that we have run out of aperture 187 * space and so want to translate the error from shmemfs back to our 188 * usual understanding of ENOMEM. 189 */ 190 if (ret == -ENOSPC) 191 ret = -ENOMEM; 192 193 return ERR_PTR(ret); 194 } 195 196 static int shmem_get_pages(struct drm_i915_gem_object *obj) 197 { 198 struct drm_i915_private *i915 = to_i915(obj->base.dev); 199 struct intel_memory_region *mem = obj->mm.region; 200 struct address_space *mapping = obj->base.filp->f_mapping; 201 const unsigned long page_count = obj->base.size / PAGE_SIZE; 202 unsigned int max_segment = i915_sg_segment_size(); 203 struct sg_table *st; 204 struct sgt_iter sgt_iter; 205 struct page *page; 206 int ret; 207 208 /* 209 * Assert that the object is not currently in any GPU domain. As it 210 * wasn't in the GTT, there shouldn't be any way it could have been in 211 * a GPU cache 212 */ 213 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 214 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 215 216 rebuild_st: 217 st = shmem_alloc_st(i915, obj->base.size, mem, mapping, max_segment); 218 if (IS_ERR(st)) { 219 ret = PTR_ERR(st); 220 goto err_st; 221 } 222 223 ret = i915_gem_gtt_prepare_pages(obj, st); 224 if (ret) { 225 /* 226 * DMA remapping failed? One possible cause is that 227 * it could not reserve enough large entries, asking 228 * for PAGE_SIZE chunks instead may be helpful. 229 */ 230 if (max_segment > PAGE_SIZE) { 231 for_each_sgt_page(page, sgt_iter, st) 232 put_page(page); 233 sg_free_table(st); 234 kfree(st); 235 236 max_segment = PAGE_SIZE; 237 goto rebuild_st; 238 } else { 239 dev_warn(i915->drm.dev, 240 "Failed to DMA remap %lu pages\n", 241 page_count); 242 goto err_pages; 243 } 244 } 245 246 if (i915_gem_object_needs_bit17_swizzle(obj)) 247 i915_gem_object_do_bit_17_swizzle(obj, st); 248 249 if (i915_gem_object_can_bypass_llc(obj)) 250 obj->cache_dirty = true; 251 252 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); 253 254 return 0; 255 256 err_pages: 257 shmem_free_st(st, mapping, false, false); 258 /* 259 * shmemfs first checks if there is enough memory to allocate the page 260 * and reports ENOSPC should there be insufficient, along with the usual 261 * ENOMEM for a genuine allocation failure. 262 * 263 * We use ENOSPC in our driver to mean that we have run out of aperture 264 * space and so want to translate the error from shmemfs back to our 265 * usual understanding of ENOMEM. 266 */ 267 err_st: 268 if (ret == -ENOSPC) 269 ret = -ENOMEM; 270 271 return ret; 272 } 273 274 static int 275 shmem_truncate(struct drm_i915_gem_object *obj) 276 { 277 /* 278 * Our goal here is to return as much of the memory as 279 * is possible back to the system as we are called from OOM. 280 * To do this we must instruct the shmfs to drop all of its 281 * backing pages, *now*. 282 */ 283 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 284 obj->mm.madv = __I915_MADV_PURGED; 285 obj->mm.pages = ERR_PTR(-EFAULT); 286 287 return 0; 288 } 289 290 void __shmem_writeback(size_t size, struct address_space *mapping) 291 { 292 struct writeback_control wbc = { 293 .sync_mode = WB_SYNC_NONE, 294 .nr_to_write = SWAP_CLUSTER_MAX, 295 .range_start = 0, 296 .range_end = LLONG_MAX, 297 .for_reclaim = 1, 298 }; 299 unsigned long i; 300 301 /* 302 * Leave mmapings intact (GTT will have been revoked on unbinding, 303 * leaving only CPU mmapings around) and add those pages to the LRU 304 * instead of invoking writeback so they are aged and paged out 305 * as normal. 306 */ 307 308 /* Begin writeback on each dirty page */ 309 for (i = 0; i < size >> PAGE_SHIFT; i++) { 310 struct page *page; 311 312 page = find_lock_page(mapping, i); 313 if (!page) 314 continue; 315 316 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 317 int ret; 318 319 SetPageReclaim(page); 320 ret = mapping->a_ops->writepage(page, &wbc); 321 if (!PageWriteback(page)) 322 ClearPageReclaim(page); 323 if (!ret) 324 goto put; 325 } 326 unlock_page(page); 327 put: 328 put_page(page); 329 } 330 } 331 332 static void 333 shmem_writeback(struct drm_i915_gem_object *obj) 334 { 335 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); 336 } 337 338 void 339 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 340 struct sg_table *pages, 341 bool needs_clflush) 342 { 343 struct drm_i915_private *i915 = to_i915(obj->base.dev); 344 345 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 346 347 if (obj->mm.madv == I915_MADV_DONTNEED) 348 obj->mm.dirty = false; 349 350 if (needs_clflush && 351 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 352 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 353 drm_clflush_sg(pages); 354 355 __start_cpu_write(obj); 356 /* 357 * On non-LLC platforms, force the flush-on-acquire if this is ever 358 * swapped-in. Our async flush path is not trust worthy enough yet(and 359 * happens in the wrong order), and with some tricks it's conceivable 360 * for userspace to change the cache-level to I915_CACHE_NONE after the 361 * pages are swapped-in, and since execbuf binds the object before doing 362 * the async flush, we have a race window. 363 */ 364 if (!HAS_LLC(i915)) 365 obj->cache_dirty = true; 366 } 367 368 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 369 { 370 __i915_gem_object_release_shmem(obj, pages, true); 371 372 i915_gem_gtt_finish_pages(obj, pages); 373 374 if (i915_gem_object_needs_bit17_swizzle(obj)) 375 i915_gem_object_save_bit_17_swizzle(obj, pages); 376 377 shmem_free_st(pages, file_inode(obj->base.filp)->i_mapping, 378 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); 379 obj->mm.dirty = false; 380 } 381 382 static void 383 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 384 { 385 if (likely(i915_gem_object_has_struct_page(obj))) 386 i915_gem_object_put_pages_shmem(obj, pages); 387 else 388 i915_gem_object_put_pages_phys(obj, pages); 389 } 390 391 static int 392 shmem_pwrite(struct drm_i915_gem_object *obj, 393 const struct drm_i915_gem_pwrite *arg) 394 { 395 struct address_space *mapping = obj->base.filp->f_mapping; 396 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 397 u64 remain, offset; 398 unsigned int pg; 399 400 /* Caller already validated user args */ 401 GEM_BUG_ON(!access_ok(user_data, arg->size)); 402 403 if (!i915_gem_object_has_struct_page(obj)) 404 return i915_gem_object_pwrite_phys(obj, arg); 405 406 /* 407 * Before we instantiate/pin the backing store for our use, we 408 * can prepopulate the shmemfs filp efficiently using a write into 409 * the pagecache. We avoid the penalty of instantiating all the 410 * pages, important if the user is just writing to a few and never 411 * uses the object on the GPU, and using a direct write into shmemfs 412 * allows it to avoid the cost of retrieving a page (either swapin 413 * or clearing-before-use) before it is overwritten. 414 */ 415 if (i915_gem_object_has_pages(obj)) 416 return -ENODEV; 417 418 if (obj->mm.madv != I915_MADV_WILLNEED) 419 return -EFAULT; 420 421 /* 422 * Before the pages are instantiated the object is treated as being 423 * in the CPU domain. The pages will be clflushed as required before 424 * use, and we can freely write into the pages directly. If userspace 425 * races pwrite with any other operation; corruption will ensue - 426 * that is userspace's prerogative! 427 */ 428 429 remain = arg->size; 430 offset = arg->offset; 431 pg = offset_in_page(offset); 432 433 do { 434 unsigned int len, unwritten; 435 struct page *page; 436 void *data, *vaddr; 437 int err; 438 char c; 439 440 len = PAGE_SIZE - pg; 441 if (len > remain) 442 len = remain; 443 444 /* Prefault the user page to reduce potential recursion */ 445 err = __get_user(c, user_data); 446 if (err) 447 return err; 448 449 err = __get_user(c, user_data + len - 1); 450 if (err) 451 return err; 452 453 err = pagecache_write_begin(obj->base.filp, mapping, 454 offset, len, 0, 455 &page, &data); 456 if (err < 0) 457 return err; 458 459 vaddr = kmap_atomic(page); 460 unwritten = __copy_from_user_inatomic(vaddr + pg, 461 user_data, 462 len); 463 kunmap_atomic(vaddr); 464 465 err = pagecache_write_end(obj->base.filp, mapping, 466 offset, len, len - unwritten, 467 page, data); 468 if (err < 0) 469 return err; 470 471 /* We don't handle -EFAULT, leave it to the caller to check */ 472 if (unwritten) 473 return -ENODEV; 474 475 remain -= len; 476 user_data += len; 477 offset += len; 478 pg = 0; 479 } while (remain); 480 481 return 0; 482 } 483 484 static int 485 shmem_pread(struct drm_i915_gem_object *obj, 486 const struct drm_i915_gem_pread *arg) 487 { 488 if (!i915_gem_object_has_struct_page(obj)) 489 return i915_gem_object_pread_phys(obj, arg); 490 491 return -ENODEV; 492 } 493 494 static void shmem_release(struct drm_i915_gem_object *obj) 495 { 496 if (i915_gem_object_has_struct_page(obj)) 497 i915_gem_object_release_memory_region(obj); 498 499 fput(obj->base.filp); 500 } 501 502 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 503 .name = "i915_gem_object_shmem", 504 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 505 506 .get_pages = shmem_get_pages, 507 .put_pages = shmem_put_pages, 508 .truncate = shmem_truncate, 509 .writeback = shmem_writeback, 510 511 .pwrite = shmem_pwrite, 512 .pread = shmem_pread, 513 514 .release = shmem_release, 515 }; 516 517 static int __create_shmem(struct drm_i915_private *i915, 518 struct drm_gem_object *obj, 519 resource_size_t size) 520 { 521 unsigned long flags = VM_NORESERVE; 522 struct file *filp; 523 524 drm_gem_private_object_init(&i915->drm, obj, size); 525 526 if (i915->mm.gemfs) 527 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 528 flags); 529 else 530 filp = shmem_file_setup("i915", size, flags); 531 if (IS_ERR(filp)) 532 return PTR_ERR(filp); 533 534 obj->filp = filp; 535 return 0; 536 } 537 538 static int shmem_object_init(struct intel_memory_region *mem, 539 struct drm_i915_gem_object *obj, 540 resource_size_t size, 541 resource_size_t page_size, 542 unsigned int flags) 543 { 544 static struct lock_class_key lock_class; 545 struct drm_i915_private *i915 = mem->i915; 546 struct address_space *mapping; 547 unsigned int cache_level; 548 gfp_t mask; 549 int ret; 550 551 ret = __create_shmem(i915, &obj->base, size); 552 if (ret) 553 return ret; 554 555 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 556 if (IS_I965GM(i915) || IS_I965G(i915)) { 557 /* 965gm cannot relocate objects above 4GiB. */ 558 mask &= ~__GFP_HIGHMEM; 559 mask |= __GFP_DMA32; 560 } 561 562 mapping = obj->base.filp->f_mapping; 563 mapping_set_gfp_mask(mapping, mask); 564 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 565 566 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 567 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 568 obj->write_domain = I915_GEM_DOMAIN_CPU; 569 obj->read_domains = I915_GEM_DOMAIN_CPU; 570 571 if (HAS_LLC(i915)) 572 /* On some devices, we can have the GPU use the LLC (the CPU 573 * cache) for about a 10% performance improvement 574 * compared to uncached. Graphics requests other than 575 * display scanout are coherent with the CPU in 576 * accessing this cache. This means in this mode we 577 * don't need to clflush on the CPU side, and on the 578 * GPU side we only need to flush internal caches to 579 * get data visible to the CPU. 580 * 581 * However, we maintain the display planes as UC, and so 582 * need to rebind when first used as such. 583 */ 584 cache_level = I915_CACHE_LLC; 585 else 586 cache_level = I915_CACHE_NONE; 587 588 i915_gem_object_set_cache_coherency(obj, cache_level); 589 590 i915_gem_object_init_memory_region(obj, mem); 591 592 return 0; 593 } 594 595 struct drm_i915_gem_object * 596 i915_gem_object_create_shmem(struct drm_i915_private *i915, 597 resource_size_t size) 598 { 599 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 600 size, 0, 0); 601 } 602 603 /* Allocate a new GEM object and fill it with the supplied data */ 604 struct drm_i915_gem_object * 605 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 606 const void *data, resource_size_t size) 607 { 608 struct drm_i915_gem_object *obj; 609 struct file *file; 610 resource_size_t offset; 611 int err; 612 613 GEM_WARN_ON(IS_DGFX(dev_priv)); 614 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 615 if (IS_ERR(obj)) 616 return obj; 617 618 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 619 620 file = obj->base.filp; 621 offset = 0; 622 do { 623 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 624 struct page *page; 625 void *pgdata, *vaddr; 626 627 err = pagecache_write_begin(file, file->f_mapping, 628 offset, len, 0, 629 &page, &pgdata); 630 if (err < 0) 631 goto fail; 632 633 vaddr = kmap(page); 634 memcpy(vaddr, data, len); 635 kunmap(page); 636 637 err = pagecache_write_end(file, file->f_mapping, 638 offset, len, len, 639 page, pgdata); 640 if (err < 0) 641 goto fail; 642 643 size -= len; 644 data += len; 645 offset += len; 646 } while (size); 647 648 return obj; 649 650 fail: 651 i915_gem_object_put(obj); 652 return ERR_PTR(err); 653 } 654 655 static int init_shmem(struct intel_memory_region *mem) 656 { 657 int err; 658 659 err = i915_gemfs_init(mem->i915); 660 if (err) { 661 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 662 err); 663 } 664 665 intel_memory_region_set_name(mem, "system"); 666 667 return 0; /* Don't error, we can simply fallback to the kernel mnt */ 668 } 669 670 static void release_shmem(struct intel_memory_region *mem) 671 { 672 i915_gemfs_fini(mem->i915); 673 } 674 675 static const struct intel_memory_region_ops shmem_region_ops = { 676 .init = init_shmem, 677 .release = release_shmem, 678 .init_object = shmem_object_init, 679 }; 680 681 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 682 u16 type, u16 instance) 683 { 684 return intel_memory_region_create(i915, 0, 685 totalram_pages() << PAGE_SHIFT, 686 PAGE_SIZE, 0, 687 type, instance, 688 &shmem_region_ops); 689 } 690 691 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 692 { 693 return obj->ops == &i915_gem_shmem_ops; 694 } 695