1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/swap.h> 9 10 #include "gem/i915_gem_region.h" 11 #include "i915_drv.h" 12 #include "i915_gemfs.h" 13 #include "i915_gem_object.h" 14 #include "i915_scatterlist.h" 15 #include "i915_trace.h" 16 17 /* 18 * Move pages to appropriate lru and release the pagevec, decrementing the 19 * ref count of those pages. 20 */ 21 static void check_release_pagevec(struct pagevec *pvec) 22 { 23 check_move_unevictable_pages(pvec); 24 __pagevec_release(pvec); 25 cond_resched(); 26 } 27 28 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 29 bool dirty, bool backup) 30 { 31 struct sgt_iter sgt_iter; 32 struct pagevec pvec; 33 struct page *page; 34 35 mapping_clear_unevictable(mapping); 36 37 pagevec_init(&pvec); 38 for_each_sgt_page(page, sgt_iter, st) { 39 if (dirty) 40 set_page_dirty(page); 41 42 if (backup) 43 mark_page_accessed(page); 44 45 if (!pagevec_add(&pvec, page)) 46 check_release_pagevec(&pvec); 47 } 48 if (pagevec_count(&pvec)) 49 check_release_pagevec(&pvec); 50 51 sg_free_table(st); 52 } 53 54 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 55 size_t size, struct intel_memory_region *mr, 56 struct address_space *mapping, 57 unsigned int max_segment) 58 { 59 const unsigned long page_count = size / PAGE_SIZE; 60 unsigned long i; 61 struct scatterlist *sg; 62 struct page *page; 63 unsigned long last_pfn = 0; /* suppress gcc warning */ 64 gfp_t noreclaim; 65 int ret; 66 67 /* 68 * If there's no chance of allocating enough pages for the whole 69 * object, bail early. 70 */ 71 if (size > resource_size(&mr->region)) 72 return -ENOMEM; 73 74 if (sg_alloc_table(st, page_count, GFP_KERNEL)) 75 return -ENOMEM; 76 77 /* 78 * Get the list of pages out of our struct file. They'll be pinned 79 * at this point until we release them. 80 * 81 * Fail silently without starting the shrinker 82 */ 83 mapping_set_unevictable(mapping); 84 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 85 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 86 87 sg = st->sgl; 88 st->nents = 0; 89 for (i = 0; i < page_count; i++) { 90 const unsigned int shrink[] = { 91 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 92 0, 93 }, *s = shrink; 94 gfp_t gfp = noreclaim; 95 96 do { 97 cond_resched(); 98 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 99 if (!IS_ERR(page)) 100 break; 101 102 if (!*s) { 103 ret = PTR_ERR(page); 104 goto err_sg; 105 } 106 107 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 108 109 /* 110 * We've tried hard to allocate the memory by reaping 111 * our own buffer, now let the real VM do its job and 112 * go down in flames if truly OOM. 113 * 114 * However, since graphics tend to be disposable, 115 * defer the oom here by reporting the ENOMEM back 116 * to userspace. 117 */ 118 if (!*s) { 119 /* reclaim and warn, but no oom */ 120 gfp = mapping_gfp_mask(mapping); 121 122 /* 123 * Our bo are always dirty and so we require 124 * kswapd to reclaim our pages (direct reclaim 125 * does not effectively begin pageout of our 126 * buffers on its own). However, direct reclaim 127 * only waits for kswapd when under allocation 128 * congestion. So as a result __GFP_RECLAIM is 129 * unreliable and fails to actually reclaim our 130 * dirty pages -- unless you try over and over 131 * again with !__GFP_NORETRY. However, we still 132 * want to fail this allocation rather than 133 * trigger the out-of-memory killer and for 134 * this we want __GFP_RETRY_MAYFAIL. 135 */ 136 gfp |= __GFP_RETRY_MAYFAIL; 137 } 138 } while (1); 139 140 if (!i || 141 sg->length >= max_segment || 142 page_to_pfn(page) != last_pfn + 1) { 143 if (i) 144 sg = sg_next(sg); 145 146 st->nents++; 147 sg_set_page(sg, page, PAGE_SIZE, 0); 148 } else { 149 sg->length += PAGE_SIZE; 150 } 151 last_pfn = page_to_pfn(page); 152 153 /* Check that the i965g/gm workaround works. */ 154 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 155 } 156 if (sg) /* loop terminated early; short sg table */ 157 sg_mark_end(sg); 158 159 /* Trim unused sg entries to avoid wasting memory. */ 160 i915_sg_trim(st); 161 162 return 0; 163 err_sg: 164 sg_mark_end(sg); 165 if (sg != st->sgl) { 166 shmem_sg_free_table(st, mapping, false, false); 167 } else { 168 mapping_clear_unevictable(mapping); 169 sg_free_table(st); 170 } 171 172 /* 173 * shmemfs first checks if there is enough memory to allocate the page 174 * and reports ENOSPC should there be insufficient, along with the usual 175 * ENOMEM for a genuine allocation failure. 176 * 177 * We use ENOSPC in our driver to mean that we have run out of aperture 178 * space and so want to translate the error from shmemfs back to our 179 * usual understanding of ENOMEM. 180 */ 181 if (ret == -ENOSPC) 182 ret = -ENOMEM; 183 184 return ret; 185 } 186 187 static int shmem_get_pages(struct drm_i915_gem_object *obj) 188 { 189 struct drm_i915_private *i915 = to_i915(obj->base.dev); 190 struct intel_memory_region *mem = obj->mm.region; 191 struct address_space *mapping = obj->base.filp->f_mapping; 192 const unsigned long page_count = obj->base.size / PAGE_SIZE; 193 unsigned int max_segment = i915_sg_segment_size(); 194 struct sg_table *st; 195 struct sgt_iter sgt_iter; 196 struct page *page; 197 int ret; 198 199 /* 200 * Assert that the object is not currently in any GPU domain. As it 201 * wasn't in the GTT, there shouldn't be any way it could have been in 202 * a GPU cache 203 */ 204 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 205 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 206 207 rebuild_st: 208 st = kmalloc(sizeof(*st), GFP_KERNEL); 209 if (!st) 210 return -ENOMEM; 211 212 ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, 213 max_segment); 214 if (ret) 215 goto err_st; 216 217 ret = i915_gem_gtt_prepare_pages(obj, st); 218 if (ret) { 219 /* 220 * DMA remapping failed? One possible cause is that 221 * it could not reserve enough large entries, asking 222 * for PAGE_SIZE chunks instead may be helpful. 223 */ 224 if (max_segment > PAGE_SIZE) { 225 for_each_sgt_page(page, sgt_iter, st) 226 put_page(page); 227 sg_free_table(st); 228 kfree(st); 229 230 max_segment = PAGE_SIZE; 231 goto rebuild_st; 232 } else { 233 dev_warn(i915->drm.dev, 234 "Failed to DMA remap %lu pages\n", 235 page_count); 236 goto err_pages; 237 } 238 } 239 240 if (i915_gem_object_needs_bit17_swizzle(obj)) 241 i915_gem_object_do_bit_17_swizzle(obj, st); 242 243 if (i915_gem_object_can_bypass_llc(obj)) 244 obj->cache_dirty = true; 245 246 __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); 247 248 return 0; 249 250 err_pages: 251 shmem_sg_free_table(st, mapping, false, false); 252 /* 253 * shmemfs first checks if there is enough memory to allocate the page 254 * and reports ENOSPC should there be insufficient, along with the usual 255 * ENOMEM for a genuine allocation failure. 256 * 257 * We use ENOSPC in our driver to mean that we have run out of aperture 258 * space and so want to translate the error from shmemfs back to our 259 * usual understanding of ENOMEM. 260 */ 261 err_st: 262 if (ret == -ENOSPC) 263 ret = -ENOMEM; 264 265 kfree(st); 266 267 return ret; 268 } 269 270 static int 271 shmem_truncate(struct drm_i915_gem_object *obj) 272 { 273 /* 274 * Our goal here is to return as much of the memory as 275 * is possible back to the system as we are called from OOM. 276 * To do this we must instruct the shmfs to drop all of its 277 * backing pages, *now*. 278 */ 279 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 280 obj->mm.madv = __I915_MADV_PURGED; 281 obj->mm.pages = ERR_PTR(-EFAULT); 282 283 return 0; 284 } 285 286 void __shmem_writeback(size_t size, struct address_space *mapping) 287 { 288 struct writeback_control wbc = { 289 .sync_mode = WB_SYNC_NONE, 290 .nr_to_write = SWAP_CLUSTER_MAX, 291 .range_start = 0, 292 .range_end = LLONG_MAX, 293 .for_reclaim = 1, 294 }; 295 unsigned long i; 296 297 /* 298 * Leave mmapings intact (GTT will have been revoked on unbinding, 299 * leaving only CPU mmapings around) and add those pages to the LRU 300 * instead of invoking writeback so they are aged and paged out 301 * as normal. 302 */ 303 304 /* Begin writeback on each dirty page */ 305 for (i = 0; i < size >> PAGE_SHIFT; i++) { 306 struct page *page; 307 308 page = find_lock_page(mapping, i); 309 if (!page) 310 continue; 311 312 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 313 int ret; 314 315 SetPageReclaim(page); 316 ret = mapping->a_ops->writepage(page, &wbc); 317 if (!PageWriteback(page)) 318 ClearPageReclaim(page); 319 if (!ret) 320 goto put; 321 } 322 unlock_page(page); 323 put: 324 put_page(page); 325 } 326 } 327 328 static void 329 shmem_writeback(struct drm_i915_gem_object *obj) 330 { 331 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); 332 } 333 334 void 335 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 336 struct sg_table *pages, 337 bool needs_clflush) 338 { 339 struct drm_i915_private *i915 = to_i915(obj->base.dev); 340 341 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 342 343 if (obj->mm.madv == I915_MADV_DONTNEED) 344 obj->mm.dirty = false; 345 346 if (needs_clflush && 347 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 348 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 349 drm_clflush_sg(pages); 350 351 __start_cpu_write(obj); 352 /* 353 * On non-LLC platforms, force the flush-on-acquire if this is ever 354 * swapped-in. Our async flush path is not trust worthy enough yet(and 355 * happens in the wrong order), and with some tricks it's conceivable 356 * for userspace to change the cache-level to I915_CACHE_NONE after the 357 * pages are swapped-in, and since execbuf binds the object before doing 358 * the async flush, we have a race window. 359 */ 360 if (!HAS_LLC(i915)) 361 obj->cache_dirty = true; 362 } 363 364 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 365 { 366 __i915_gem_object_release_shmem(obj, pages, true); 367 368 i915_gem_gtt_finish_pages(obj, pages); 369 370 if (i915_gem_object_needs_bit17_swizzle(obj)) 371 i915_gem_object_save_bit_17_swizzle(obj, pages); 372 373 shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, 374 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); 375 kfree(pages); 376 obj->mm.dirty = false; 377 } 378 379 static void 380 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 381 { 382 if (likely(i915_gem_object_has_struct_page(obj))) 383 i915_gem_object_put_pages_shmem(obj, pages); 384 else 385 i915_gem_object_put_pages_phys(obj, pages); 386 } 387 388 static int 389 shmem_pwrite(struct drm_i915_gem_object *obj, 390 const struct drm_i915_gem_pwrite *arg) 391 { 392 struct address_space *mapping = obj->base.filp->f_mapping; 393 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 394 u64 remain, offset; 395 unsigned int pg; 396 397 /* Caller already validated user args */ 398 GEM_BUG_ON(!access_ok(user_data, arg->size)); 399 400 if (!i915_gem_object_has_struct_page(obj)) 401 return i915_gem_object_pwrite_phys(obj, arg); 402 403 /* 404 * Before we instantiate/pin the backing store for our use, we 405 * can prepopulate the shmemfs filp efficiently using a write into 406 * the pagecache. We avoid the penalty of instantiating all the 407 * pages, important if the user is just writing to a few and never 408 * uses the object on the GPU, and using a direct write into shmemfs 409 * allows it to avoid the cost of retrieving a page (either swapin 410 * or clearing-before-use) before it is overwritten. 411 */ 412 if (i915_gem_object_has_pages(obj)) 413 return -ENODEV; 414 415 if (obj->mm.madv != I915_MADV_WILLNEED) 416 return -EFAULT; 417 418 /* 419 * Before the pages are instantiated the object is treated as being 420 * in the CPU domain. The pages will be clflushed as required before 421 * use, and we can freely write into the pages directly. If userspace 422 * races pwrite with any other operation; corruption will ensue - 423 * that is userspace's prerogative! 424 */ 425 426 remain = arg->size; 427 offset = arg->offset; 428 pg = offset_in_page(offset); 429 430 do { 431 unsigned int len, unwritten; 432 struct page *page; 433 void *data, *vaddr; 434 int err; 435 char c; 436 437 len = PAGE_SIZE - pg; 438 if (len > remain) 439 len = remain; 440 441 /* Prefault the user page to reduce potential recursion */ 442 err = __get_user(c, user_data); 443 if (err) 444 return err; 445 446 err = __get_user(c, user_data + len - 1); 447 if (err) 448 return err; 449 450 err = pagecache_write_begin(obj->base.filp, mapping, 451 offset, len, 0, 452 &page, &data); 453 if (err < 0) 454 return err; 455 456 vaddr = kmap_atomic(page); 457 unwritten = __copy_from_user_inatomic(vaddr + pg, 458 user_data, 459 len); 460 kunmap_atomic(vaddr); 461 462 err = pagecache_write_end(obj->base.filp, mapping, 463 offset, len, len - unwritten, 464 page, data); 465 if (err < 0) 466 return err; 467 468 /* We don't handle -EFAULT, leave it to the caller to check */ 469 if (unwritten) 470 return -ENODEV; 471 472 remain -= len; 473 user_data += len; 474 offset += len; 475 pg = 0; 476 } while (remain); 477 478 return 0; 479 } 480 481 static int 482 shmem_pread(struct drm_i915_gem_object *obj, 483 const struct drm_i915_gem_pread *arg) 484 { 485 if (!i915_gem_object_has_struct_page(obj)) 486 return i915_gem_object_pread_phys(obj, arg); 487 488 return -ENODEV; 489 } 490 491 static void shmem_release(struct drm_i915_gem_object *obj) 492 { 493 if (i915_gem_object_has_struct_page(obj)) 494 i915_gem_object_release_memory_region(obj); 495 496 fput(obj->base.filp); 497 } 498 499 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 500 .name = "i915_gem_object_shmem", 501 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 502 503 .get_pages = shmem_get_pages, 504 .put_pages = shmem_put_pages, 505 .truncate = shmem_truncate, 506 .writeback = shmem_writeback, 507 508 .pwrite = shmem_pwrite, 509 .pread = shmem_pread, 510 511 .release = shmem_release, 512 }; 513 514 static int __create_shmem(struct drm_i915_private *i915, 515 struct drm_gem_object *obj, 516 resource_size_t size) 517 { 518 unsigned long flags = VM_NORESERVE; 519 struct file *filp; 520 521 drm_gem_private_object_init(&i915->drm, obj, size); 522 523 if (i915->mm.gemfs) 524 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 525 flags); 526 else 527 filp = shmem_file_setup("i915", size, flags); 528 if (IS_ERR(filp)) 529 return PTR_ERR(filp); 530 531 obj->filp = filp; 532 return 0; 533 } 534 535 static int shmem_object_init(struct intel_memory_region *mem, 536 struct drm_i915_gem_object *obj, 537 resource_size_t size, 538 resource_size_t page_size, 539 unsigned int flags) 540 { 541 static struct lock_class_key lock_class; 542 struct drm_i915_private *i915 = mem->i915; 543 struct address_space *mapping; 544 unsigned int cache_level; 545 gfp_t mask; 546 int ret; 547 548 ret = __create_shmem(i915, &obj->base, size); 549 if (ret) 550 return ret; 551 552 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 553 if (IS_I965GM(i915) || IS_I965G(i915)) { 554 /* 965gm cannot relocate objects above 4GiB. */ 555 mask &= ~__GFP_HIGHMEM; 556 mask |= __GFP_DMA32; 557 } 558 559 mapping = obj->base.filp->f_mapping; 560 mapping_set_gfp_mask(mapping, mask); 561 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 562 563 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 564 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 565 obj->write_domain = I915_GEM_DOMAIN_CPU; 566 obj->read_domains = I915_GEM_DOMAIN_CPU; 567 568 if (HAS_LLC(i915)) 569 /* On some devices, we can have the GPU use the LLC (the CPU 570 * cache) for about a 10% performance improvement 571 * compared to uncached. Graphics requests other than 572 * display scanout are coherent with the CPU in 573 * accessing this cache. This means in this mode we 574 * don't need to clflush on the CPU side, and on the 575 * GPU side we only need to flush internal caches to 576 * get data visible to the CPU. 577 * 578 * However, we maintain the display planes as UC, and so 579 * need to rebind when first used as such. 580 */ 581 cache_level = I915_CACHE_LLC; 582 else 583 cache_level = I915_CACHE_NONE; 584 585 i915_gem_object_set_cache_coherency(obj, cache_level); 586 587 i915_gem_object_init_memory_region(obj, mem); 588 589 return 0; 590 } 591 592 struct drm_i915_gem_object * 593 i915_gem_object_create_shmem(struct drm_i915_private *i915, 594 resource_size_t size) 595 { 596 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 597 size, 0, 0); 598 } 599 600 /* Allocate a new GEM object and fill it with the supplied data */ 601 struct drm_i915_gem_object * 602 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 603 const void *data, resource_size_t size) 604 { 605 struct drm_i915_gem_object *obj; 606 struct file *file; 607 resource_size_t offset; 608 int err; 609 610 GEM_WARN_ON(IS_DGFX(dev_priv)); 611 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 612 if (IS_ERR(obj)) 613 return obj; 614 615 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 616 617 file = obj->base.filp; 618 offset = 0; 619 do { 620 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 621 struct page *page; 622 void *pgdata, *vaddr; 623 624 err = pagecache_write_begin(file, file->f_mapping, 625 offset, len, 0, 626 &page, &pgdata); 627 if (err < 0) 628 goto fail; 629 630 vaddr = kmap(page); 631 memcpy(vaddr, data, len); 632 kunmap(page); 633 634 err = pagecache_write_end(file, file->f_mapping, 635 offset, len, len, 636 page, pgdata); 637 if (err < 0) 638 goto fail; 639 640 size -= len; 641 data += len; 642 offset += len; 643 } while (size); 644 645 return obj; 646 647 fail: 648 i915_gem_object_put(obj); 649 return ERR_PTR(err); 650 } 651 652 static int init_shmem(struct intel_memory_region *mem) 653 { 654 int err; 655 656 err = i915_gemfs_init(mem->i915); 657 if (err) { 658 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 659 err); 660 } 661 662 intel_memory_region_set_name(mem, "system"); 663 664 return 0; /* Don't error, we can simply fallback to the kernel mnt */ 665 } 666 667 static int release_shmem(struct intel_memory_region *mem) 668 { 669 i915_gemfs_fini(mem->i915); 670 return 0; 671 } 672 673 static const struct intel_memory_region_ops shmem_region_ops = { 674 .init = init_shmem, 675 .release = release_shmem, 676 .init_object = shmem_object_init, 677 }; 678 679 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 680 u16 type, u16 instance) 681 { 682 return intel_memory_region_create(i915, 0, 683 totalram_pages() << PAGE_SHIFT, 684 PAGE_SIZE, 0, 685 type, instance, 686 &shmem_region_ops); 687 } 688 689 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 690 { 691 return obj->ops == &i915_gem_shmem_ops; 692 } 693