1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/swap.h> 9 10 #include "gem/i915_gem_region.h" 11 #include "i915_drv.h" 12 #include "i915_gemfs.h" 13 #include "i915_gem_object.h" 14 #include "i915_scatterlist.h" 15 #include "i915_trace.h" 16 17 /* 18 * Move pages to appropriate lru and release the pagevec, decrementing the 19 * ref count of those pages. 20 */ 21 static void check_release_pagevec(struct pagevec *pvec) 22 { 23 check_move_unevictable_pages(pvec); 24 __pagevec_release(pvec); 25 cond_resched(); 26 } 27 28 static void shmem_free_st(struct sg_table *st, struct address_space *mapping, 29 bool dirty, bool backup) 30 { 31 struct sgt_iter sgt_iter; 32 struct pagevec pvec; 33 struct page *page; 34 35 mapping_clear_unevictable(mapping); 36 37 pagevec_init(&pvec); 38 for_each_sgt_page(page, sgt_iter, st) { 39 if (dirty) 40 set_page_dirty(page); 41 42 if (backup) 43 mark_page_accessed(page); 44 45 if (!pagevec_add(&pvec, page)) 46 check_release_pagevec(&pvec); 47 } 48 if (pagevec_count(&pvec)) 49 check_release_pagevec(&pvec); 50 51 sg_free_table(st); 52 kfree(st); 53 } 54 55 static struct sg_table *shmem_alloc_st(struct drm_i915_private *i915, 56 size_t size, struct intel_memory_region *mr, 57 struct address_space *mapping, 58 unsigned int max_segment) 59 { 60 const unsigned long page_count = size / PAGE_SIZE; 61 unsigned long i; 62 struct sg_table *st; 63 struct scatterlist *sg; 64 struct page *page; 65 unsigned long last_pfn = 0; /* suppress gcc warning */ 66 gfp_t noreclaim; 67 int ret; 68 69 /* 70 * If there's no chance of allocating enough pages for the whole 71 * object, bail early. 72 */ 73 if (size > resource_size(&mr->region)) 74 return ERR_PTR(-ENOMEM); 75 76 st = kmalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 return ERR_PTR(-ENOMEM); 79 80 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 81 kfree(st); 82 return ERR_PTR(-ENOMEM); 83 } 84 85 /* 86 * Get the list of pages out of our struct file. They'll be pinned 87 * at this point until we release them. 88 * 89 * Fail silently without starting the shrinker 90 */ 91 mapping_set_unevictable(mapping); 92 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 93 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 94 95 sg = st->sgl; 96 st->nents = 0; 97 for (i = 0; i < page_count; i++) { 98 const unsigned int shrink[] = { 99 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 100 0, 101 }, *s = shrink; 102 gfp_t gfp = noreclaim; 103 104 do { 105 cond_resched(); 106 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 107 if (!IS_ERR(page)) 108 break; 109 110 if (!*s) { 111 ret = PTR_ERR(page); 112 goto err_sg; 113 } 114 115 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 116 117 /* 118 * We've tried hard to allocate the memory by reaping 119 * our own buffer, now let the real VM do its job and 120 * go down in flames if truly OOM. 121 * 122 * However, since graphics tend to be disposable, 123 * defer the oom here by reporting the ENOMEM back 124 * to userspace. 125 */ 126 if (!*s) { 127 /* reclaim and warn, but no oom */ 128 gfp = mapping_gfp_mask(mapping); 129 130 /* 131 * Our bo are always dirty and so we require 132 * kswapd to reclaim our pages (direct reclaim 133 * does not effectively begin pageout of our 134 * buffers on its own). However, direct reclaim 135 * only waits for kswapd when under allocation 136 * congestion. So as a result __GFP_RECLAIM is 137 * unreliable and fails to actually reclaim our 138 * dirty pages -- unless you try over and over 139 * again with !__GFP_NORETRY. However, we still 140 * want to fail this allocation rather than 141 * trigger the out-of-memory killer and for 142 * this we want __GFP_RETRY_MAYFAIL. 143 */ 144 gfp |= __GFP_RETRY_MAYFAIL; 145 } 146 } while (1); 147 148 if (!i || 149 sg->length >= max_segment || 150 page_to_pfn(page) != last_pfn + 1) { 151 if (i) 152 sg = sg_next(sg); 153 154 st->nents++; 155 sg_set_page(sg, page, PAGE_SIZE, 0); 156 } else { 157 sg->length += PAGE_SIZE; 158 } 159 last_pfn = page_to_pfn(page); 160 161 /* Check that the i965g/gm workaround works. */ 162 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 163 } 164 if (sg) /* loop terminated early; short sg table */ 165 sg_mark_end(sg); 166 167 /* Trim unused sg entries to avoid wasting memory. */ 168 i915_sg_trim(st); 169 170 return st; 171 err_sg: 172 sg_mark_end(sg); 173 if (sg != st->sgl) { 174 shmem_free_st(st, mapping, false, false); 175 } else { 176 mapping_clear_unevictable(mapping); 177 sg_free_table(st); 178 kfree(st); 179 } 180 181 /* 182 * shmemfs first checks if there is enough memory to allocate the page 183 * and reports ENOSPC should there be insufficient, along with the usual 184 * ENOMEM for a genuine allocation failure. 185 * 186 * We use ENOSPC in our driver to mean that we have run out of aperture 187 * space and so want to translate the error from shmemfs back to our 188 * usual understanding of ENOMEM. 189 */ 190 if (ret == -ENOSPC) 191 ret = -ENOMEM; 192 193 return ERR_PTR(ret); 194 } 195 196 static int shmem_get_pages(struct drm_i915_gem_object *obj) 197 { 198 struct drm_i915_private *i915 = to_i915(obj->base.dev); 199 struct intel_memory_region *mem = obj->mm.region; 200 struct address_space *mapping = obj->base.filp->f_mapping; 201 const unsigned long page_count = obj->base.size / PAGE_SIZE; 202 unsigned int max_segment = i915_sg_segment_size(); 203 struct sg_table *st; 204 struct sgt_iter sgt_iter; 205 struct page *page; 206 int ret; 207 208 /* 209 * Assert that the object is not currently in any GPU domain. As it 210 * wasn't in the GTT, there shouldn't be any way it could have been in 211 * a GPU cache 212 */ 213 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 214 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 215 216 rebuild_st: 217 st = shmem_alloc_st(i915, obj->base.size, mem, mapping, max_segment); 218 if (IS_ERR(st)) { 219 ret = PTR_ERR(st); 220 goto err_st; 221 } 222 223 ret = i915_gem_gtt_prepare_pages(obj, st); 224 if (ret) { 225 /* 226 * DMA remapping failed? One possible cause is that 227 * it could not reserve enough large entries, asking 228 * for PAGE_SIZE chunks instead may be helpful. 229 */ 230 if (max_segment > PAGE_SIZE) { 231 for_each_sgt_page(page, sgt_iter, st) 232 put_page(page); 233 sg_free_table(st); 234 kfree(st); 235 236 max_segment = PAGE_SIZE; 237 goto rebuild_st; 238 } else { 239 dev_warn(i915->drm.dev, 240 "Failed to DMA remap %lu pages\n", 241 page_count); 242 goto err_pages; 243 } 244 } 245 246 if (i915_gem_object_needs_bit17_swizzle(obj)) 247 i915_gem_object_do_bit_17_swizzle(obj, st); 248 249 if (i915_gem_object_can_bypass_llc(obj)) 250 obj->cache_dirty = true; 251 252 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); 253 254 return 0; 255 256 err_pages: 257 shmem_free_st(st, mapping, false, false); 258 /* 259 * shmemfs first checks if there is enough memory to allocate the page 260 * and reports ENOSPC should there be insufficient, along with the usual 261 * ENOMEM for a genuine allocation failure. 262 * 263 * We use ENOSPC in our driver to mean that we have run out of aperture 264 * space and so want to translate the error from shmemfs back to our 265 * usual understanding of ENOMEM. 266 */ 267 err_st: 268 if (ret == -ENOSPC) 269 ret = -ENOMEM; 270 271 return ret; 272 } 273 274 static void 275 shmem_truncate(struct drm_i915_gem_object *obj) 276 { 277 /* 278 * Our goal here is to return as much of the memory as 279 * is possible back to the system as we are called from OOM. 280 * To do this we must instruct the shmfs to drop all of its 281 * backing pages, *now*. 282 */ 283 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 284 obj->mm.madv = __I915_MADV_PURGED; 285 obj->mm.pages = ERR_PTR(-EFAULT); 286 } 287 288 static void __shmem_writeback(size_t size, struct address_space *mapping) 289 { 290 struct writeback_control wbc = { 291 .sync_mode = WB_SYNC_NONE, 292 .nr_to_write = SWAP_CLUSTER_MAX, 293 .range_start = 0, 294 .range_end = LLONG_MAX, 295 .for_reclaim = 1, 296 }; 297 unsigned long i; 298 299 /* 300 * Leave mmapings intact (GTT will have been revoked on unbinding, 301 * leaving only CPU mmapings around) and add those pages to the LRU 302 * instead of invoking writeback so they are aged and paged out 303 * as normal. 304 */ 305 306 /* Begin writeback on each dirty page */ 307 for (i = 0; i < size >> PAGE_SHIFT; i++) { 308 struct page *page; 309 310 page = find_lock_page(mapping, i); 311 if (!page) 312 continue; 313 314 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 315 int ret; 316 317 SetPageReclaim(page); 318 ret = mapping->a_ops->writepage(page, &wbc); 319 if (!PageWriteback(page)) 320 ClearPageReclaim(page); 321 if (!ret) 322 goto put; 323 } 324 unlock_page(page); 325 put: 326 put_page(page); 327 } 328 } 329 330 static void 331 shmem_writeback(struct drm_i915_gem_object *obj) 332 { 333 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); 334 } 335 336 void 337 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 338 struct sg_table *pages, 339 bool needs_clflush) 340 { 341 struct drm_i915_private *i915 = to_i915(obj->base.dev); 342 343 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 344 345 if (obj->mm.madv == I915_MADV_DONTNEED) 346 obj->mm.dirty = false; 347 348 if (needs_clflush && 349 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 350 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 351 drm_clflush_sg(pages); 352 353 __start_cpu_write(obj); 354 /* 355 * On non-LLC platforms, force the flush-on-acquire if this is ever 356 * swapped-in. Our async flush path is not trust worthy enough yet(and 357 * happens in the wrong order), and with some tricks it's conceivable 358 * for userspace to change the cache-level to I915_CACHE_NONE after the 359 * pages are swapped-in, and since execbuf binds the object before doing 360 * the async flush, we have a race window. 361 */ 362 if (!HAS_LLC(i915)) 363 obj->cache_dirty = true; 364 } 365 366 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 367 { 368 __i915_gem_object_release_shmem(obj, pages, true); 369 370 i915_gem_gtt_finish_pages(obj, pages); 371 372 if (i915_gem_object_needs_bit17_swizzle(obj)) 373 i915_gem_object_save_bit_17_swizzle(obj, pages); 374 375 shmem_free_st(pages, file_inode(obj->base.filp)->i_mapping, 376 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); 377 obj->mm.dirty = false; 378 } 379 380 static void 381 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 382 { 383 if (likely(i915_gem_object_has_struct_page(obj))) 384 i915_gem_object_put_pages_shmem(obj, pages); 385 else 386 i915_gem_object_put_pages_phys(obj, pages); 387 } 388 389 static int 390 shmem_pwrite(struct drm_i915_gem_object *obj, 391 const struct drm_i915_gem_pwrite *arg) 392 { 393 struct address_space *mapping = obj->base.filp->f_mapping; 394 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 395 u64 remain, offset; 396 unsigned int pg; 397 398 /* Caller already validated user args */ 399 GEM_BUG_ON(!access_ok(user_data, arg->size)); 400 401 if (!i915_gem_object_has_struct_page(obj)) 402 return i915_gem_object_pwrite_phys(obj, arg); 403 404 /* 405 * Before we instantiate/pin the backing store for our use, we 406 * can prepopulate the shmemfs filp efficiently using a write into 407 * the pagecache. We avoid the penalty of instantiating all the 408 * pages, important if the user is just writing to a few and never 409 * uses the object on the GPU, and using a direct write into shmemfs 410 * allows it to avoid the cost of retrieving a page (either swapin 411 * or clearing-before-use) before it is overwritten. 412 */ 413 if (i915_gem_object_has_pages(obj)) 414 return -ENODEV; 415 416 if (obj->mm.madv != I915_MADV_WILLNEED) 417 return -EFAULT; 418 419 /* 420 * Before the pages are instantiated the object is treated as being 421 * in the CPU domain. The pages will be clflushed as required before 422 * use, and we can freely write into the pages directly. If userspace 423 * races pwrite with any other operation; corruption will ensue - 424 * that is userspace's prerogative! 425 */ 426 427 remain = arg->size; 428 offset = arg->offset; 429 pg = offset_in_page(offset); 430 431 do { 432 unsigned int len, unwritten; 433 struct page *page; 434 void *data, *vaddr; 435 int err; 436 char c; 437 438 len = PAGE_SIZE - pg; 439 if (len > remain) 440 len = remain; 441 442 /* Prefault the user page to reduce potential recursion */ 443 err = __get_user(c, user_data); 444 if (err) 445 return err; 446 447 err = __get_user(c, user_data + len - 1); 448 if (err) 449 return err; 450 451 err = pagecache_write_begin(obj->base.filp, mapping, 452 offset, len, 0, 453 &page, &data); 454 if (err < 0) 455 return err; 456 457 vaddr = kmap_atomic(page); 458 unwritten = __copy_from_user_inatomic(vaddr + pg, 459 user_data, 460 len); 461 kunmap_atomic(vaddr); 462 463 err = pagecache_write_end(obj->base.filp, mapping, 464 offset, len, len - unwritten, 465 page, data); 466 if (err < 0) 467 return err; 468 469 /* We don't handle -EFAULT, leave it to the caller to check */ 470 if (unwritten) 471 return -ENODEV; 472 473 remain -= len; 474 user_data += len; 475 offset += len; 476 pg = 0; 477 } while (remain); 478 479 return 0; 480 } 481 482 static int 483 shmem_pread(struct drm_i915_gem_object *obj, 484 const struct drm_i915_gem_pread *arg) 485 { 486 if (!i915_gem_object_has_struct_page(obj)) 487 return i915_gem_object_pread_phys(obj, arg); 488 489 return -ENODEV; 490 } 491 492 static void shmem_release(struct drm_i915_gem_object *obj) 493 { 494 if (i915_gem_object_has_struct_page(obj)) 495 i915_gem_object_release_memory_region(obj); 496 497 fput(obj->base.filp); 498 } 499 500 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 501 .name = "i915_gem_object_shmem", 502 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 503 504 .get_pages = shmem_get_pages, 505 .put_pages = shmem_put_pages, 506 .truncate = shmem_truncate, 507 .writeback = shmem_writeback, 508 509 .pwrite = shmem_pwrite, 510 .pread = shmem_pread, 511 512 .release = shmem_release, 513 }; 514 515 static int __create_shmem(struct drm_i915_private *i915, 516 struct drm_gem_object *obj, 517 resource_size_t size) 518 { 519 unsigned long flags = VM_NORESERVE; 520 struct file *filp; 521 522 drm_gem_private_object_init(&i915->drm, obj, size); 523 524 if (i915->mm.gemfs) 525 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 526 flags); 527 else 528 filp = shmem_file_setup("i915", size, flags); 529 if (IS_ERR(filp)) 530 return PTR_ERR(filp); 531 532 obj->filp = filp; 533 return 0; 534 } 535 536 static int shmem_object_init(struct intel_memory_region *mem, 537 struct drm_i915_gem_object *obj, 538 resource_size_t size, 539 resource_size_t page_size, 540 unsigned int flags) 541 { 542 static struct lock_class_key lock_class; 543 struct drm_i915_private *i915 = mem->i915; 544 struct address_space *mapping; 545 unsigned int cache_level; 546 gfp_t mask; 547 int ret; 548 549 ret = __create_shmem(i915, &obj->base, size); 550 if (ret) 551 return ret; 552 553 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 554 if (IS_I965GM(i915) || IS_I965G(i915)) { 555 /* 965gm cannot relocate objects above 4GiB. */ 556 mask &= ~__GFP_HIGHMEM; 557 mask |= __GFP_DMA32; 558 } 559 560 mapping = obj->base.filp->f_mapping; 561 mapping_set_gfp_mask(mapping, mask); 562 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 563 564 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 565 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 566 obj->write_domain = I915_GEM_DOMAIN_CPU; 567 obj->read_domains = I915_GEM_DOMAIN_CPU; 568 569 if (HAS_LLC(i915)) 570 /* On some devices, we can have the GPU use the LLC (the CPU 571 * cache) for about a 10% performance improvement 572 * compared to uncached. Graphics requests other than 573 * display scanout are coherent with the CPU in 574 * accessing this cache. This means in this mode we 575 * don't need to clflush on the CPU side, and on the 576 * GPU side we only need to flush internal caches to 577 * get data visible to the CPU. 578 * 579 * However, we maintain the display planes as UC, and so 580 * need to rebind when first used as such. 581 */ 582 cache_level = I915_CACHE_LLC; 583 else 584 cache_level = I915_CACHE_NONE; 585 586 i915_gem_object_set_cache_coherency(obj, cache_level); 587 588 i915_gem_object_init_memory_region(obj, mem); 589 590 return 0; 591 } 592 593 struct drm_i915_gem_object * 594 i915_gem_object_create_shmem(struct drm_i915_private *i915, 595 resource_size_t size) 596 { 597 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 598 size, 0, 0); 599 } 600 601 /* Allocate a new GEM object and fill it with the supplied data */ 602 struct drm_i915_gem_object * 603 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 604 const void *data, resource_size_t size) 605 { 606 struct drm_i915_gem_object *obj; 607 struct file *file; 608 resource_size_t offset; 609 int err; 610 611 GEM_WARN_ON(IS_DGFX(dev_priv)); 612 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 613 if (IS_ERR(obj)) 614 return obj; 615 616 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 617 618 file = obj->base.filp; 619 offset = 0; 620 do { 621 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 622 struct page *page; 623 void *pgdata, *vaddr; 624 625 err = pagecache_write_begin(file, file->f_mapping, 626 offset, len, 0, 627 &page, &pgdata); 628 if (err < 0) 629 goto fail; 630 631 vaddr = kmap(page); 632 memcpy(vaddr, data, len); 633 kunmap(page); 634 635 err = pagecache_write_end(file, file->f_mapping, 636 offset, len, len, 637 page, pgdata); 638 if (err < 0) 639 goto fail; 640 641 size -= len; 642 data += len; 643 offset += len; 644 } while (size); 645 646 return obj; 647 648 fail: 649 i915_gem_object_put(obj); 650 return ERR_PTR(err); 651 } 652 653 static int init_shmem(struct intel_memory_region *mem) 654 { 655 int err; 656 657 err = i915_gemfs_init(mem->i915); 658 if (err) { 659 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 660 err); 661 } 662 663 intel_memory_region_set_name(mem, "system"); 664 665 return 0; /* Don't error, we can simply fallback to the kernel mnt */ 666 } 667 668 static void release_shmem(struct intel_memory_region *mem) 669 { 670 i915_gemfs_fini(mem->i915); 671 } 672 673 static const struct intel_memory_region_ops shmem_region_ops = { 674 .init = init_shmem, 675 .release = release_shmem, 676 .init_object = shmem_object_init, 677 }; 678 679 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 680 u16 type, u16 instance) 681 { 682 return intel_memory_region_create(i915, 0, 683 totalram_pages() << PAGE_SHIFT, 684 PAGE_SIZE, 0, 685 type, instance, 686 &shmem_region_ops); 687 } 688 689 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 690 { 691 return obj->ops == &i915_gem_shmem_ops; 692 } 693