1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/swap.h> 9 10 #include "gem/i915_gem_region.h" 11 #include "i915_drv.h" 12 #include "i915_gemfs.h" 13 #include "i915_gem_object.h" 14 #include "i915_scatterlist.h" 15 #include "i915_trace.h" 16 17 /* 18 * Move pages to appropriate lru and release the pagevec, decrementing the 19 * ref count of those pages. 20 */ 21 static void check_release_pagevec(struct pagevec *pvec) 22 { 23 check_move_unevictable_pages(pvec); 24 __pagevec_release(pvec); 25 cond_resched(); 26 } 27 28 static int shmem_get_pages(struct drm_i915_gem_object *obj) 29 { 30 struct drm_i915_private *i915 = to_i915(obj->base.dev); 31 struct intel_memory_region *mem = obj->mm.region; 32 const unsigned long page_count = obj->base.size / PAGE_SIZE; 33 unsigned long i; 34 struct address_space *mapping; 35 struct sg_table *st; 36 struct scatterlist *sg; 37 struct sgt_iter sgt_iter; 38 struct page *page; 39 unsigned long last_pfn = 0; /* suppress gcc warning */ 40 unsigned int max_segment = i915_sg_segment_size(); 41 unsigned int sg_page_sizes; 42 gfp_t noreclaim; 43 int ret; 44 45 /* 46 * Assert that the object is not currently in any GPU domain. As it 47 * wasn't in the GTT, there shouldn't be any way it could have been in 48 * a GPU cache 49 */ 50 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 51 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 52 53 /* 54 * If there's no chance of allocating enough pages for the whole 55 * object, bail early. 56 */ 57 if (obj->base.size > resource_size(&mem->region)) 58 return -ENOMEM; 59 60 st = kmalloc(sizeof(*st), GFP_KERNEL); 61 if (!st) 62 return -ENOMEM; 63 64 rebuild_st: 65 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 66 kfree(st); 67 return -ENOMEM; 68 } 69 70 /* 71 * Get the list of pages out of our struct file. They'll be pinned 72 * at this point until we release them. 73 * 74 * Fail silently without starting the shrinker 75 */ 76 mapping = obj->base.filp->f_mapping; 77 mapping_set_unevictable(mapping); 78 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 79 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 80 81 sg = st->sgl; 82 st->nents = 0; 83 sg_page_sizes = 0; 84 for (i = 0; i < page_count; i++) { 85 const unsigned int shrink[] = { 86 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 87 0, 88 }, *s = shrink; 89 gfp_t gfp = noreclaim; 90 91 do { 92 cond_resched(); 93 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 94 if (!IS_ERR(page)) 95 break; 96 97 if (!*s) { 98 ret = PTR_ERR(page); 99 goto err_sg; 100 } 101 102 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 103 104 /* 105 * We've tried hard to allocate the memory by reaping 106 * our own buffer, now let the real VM do its job and 107 * go down in flames if truly OOM. 108 * 109 * However, since graphics tend to be disposable, 110 * defer the oom here by reporting the ENOMEM back 111 * to userspace. 112 */ 113 if (!*s) { 114 /* reclaim and warn, but no oom */ 115 gfp = mapping_gfp_mask(mapping); 116 117 /* 118 * Our bo are always dirty and so we require 119 * kswapd to reclaim our pages (direct reclaim 120 * does not effectively begin pageout of our 121 * buffers on its own). However, direct reclaim 122 * only waits for kswapd when under allocation 123 * congestion. So as a result __GFP_RECLAIM is 124 * unreliable and fails to actually reclaim our 125 * dirty pages -- unless you try over and over 126 * again with !__GFP_NORETRY. However, we still 127 * want to fail this allocation rather than 128 * trigger the out-of-memory killer and for 129 * this we want __GFP_RETRY_MAYFAIL. 130 */ 131 gfp |= __GFP_RETRY_MAYFAIL; 132 } 133 } while (1); 134 135 if (!i || 136 sg->length >= max_segment || 137 page_to_pfn(page) != last_pfn + 1) { 138 if (i) { 139 sg_page_sizes |= sg->length; 140 sg = sg_next(sg); 141 } 142 st->nents++; 143 sg_set_page(sg, page, PAGE_SIZE, 0); 144 } else { 145 sg->length += PAGE_SIZE; 146 } 147 last_pfn = page_to_pfn(page); 148 149 /* Check that the i965g/gm workaround works. */ 150 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 151 } 152 if (sg) { /* loop terminated early; short sg table */ 153 sg_page_sizes |= sg->length; 154 sg_mark_end(sg); 155 } 156 157 /* Trim unused sg entries to avoid wasting memory. */ 158 i915_sg_trim(st); 159 160 ret = i915_gem_gtt_prepare_pages(obj, st); 161 if (ret) { 162 /* 163 * DMA remapping failed? One possible cause is that 164 * it could not reserve enough large entries, asking 165 * for PAGE_SIZE chunks instead may be helpful. 166 */ 167 if (max_segment > PAGE_SIZE) { 168 for_each_sgt_page(page, sgt_iter, st) 169 put_page(page); 170 sg_free_table(st); 171 172 max_segment = PAGE_SIZE; 173 goto rebuild_st; 174 } else { 175 dev_warn(i915->drm.dev, 176 "Failed to DMA remap %lu pages\n", 177 page_count); 178 goto err_pages; 179 } 180 } 181 182 if (i915_gem_object_needs_bit17_swizzle(obj)) 183 i915_gem_object_do_bit_17_swizzle(obj, st); 184 185 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 186 187 return 0; 188 189 err_sg: 190 sg_mark_end(sg); 191 err_pages: 192 mapping_clear_unevictable(mapping); 193 if (sg != st->sgl) { 194 struct pagevec pvec; 195 196 pagevec_init(&pvec); 197 for_each_sgt_page(page, sgt_iter, st) { 198 if (!pagevec_add(&pvec, page)) 199 check_release_pagevec(&pvec); 200 } 201 if (pagevec_count(&pvec)) 202 check_release_pagevec(&pvec); 203 } 204 sg_free_table(st); 205 kfree(st); 206 207 /* 208 * shmemfs first checks if there is enough memory to allocate the page 209 * and reports ENOSPC should there be insufficient, along with the usual 210 * ENOMEM for a genuine allocation failure. 211 * 212 * We use ENOSPC in our driver to mean that we have run out of aperture 213 * space and so want to translate the error from shmemfs back to our 214 * usual understanding of ENOMEM. 215 */ 216 if (ret == -ENOSPC) 217 ret = -ENOMEM; 218 219 return ret; 220 } 221 222 static void 223 shmem_truncate(struct drm_i915_gem_object *obj) 224 { 225 /* 226 * Our goal here is to return as much of the memory as 227 * is possible back to the system as we are called from OOM. 228 * To do this we must instruct the shmfs to drop all of its 229 * backing pages, *now*. 230 */ 231 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 232 obj->mm.madv = __I915_MADV_PURGED; 233 obj->mm.pages = ERR_PTR(-EFAULT); 234 } 235 236 static void 237 shmem_writeback(struct drm_i915_gem_object *obj) 238 { 239 struct address_space *mapping; 240 struct writeback_control wbc = { 241 .sync_mode = WB_SYNC_NONE, 242 .nr_to_write = SWAP_CLUSTER_MAX, 243 .range_start = 0, 244 .range_end = LLONG_MAX, 245 .for_reclaim = 1, 246 }; 247 unsigned long i; 248 249 /* 250 * Leave mmapings intact (GTT will have been revoked on unbinding, 251 * leaving only CPU mmapings around) and add those pages to the LRU 252 * instead of invoking writeback so they are aged and paged out 253 * as normal. 254 */ 255 mapping = obj->base.filp->f_mapping; 256 257 /* Begin writeback on each dirty page */ 258 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { 259 struct page *page; 260 261 page = find_lock_page(mapping, i); 262 if (!page) 263 continue; 264 265 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 266 int ret; 267 268 SetPageReclaim(page); 269 ret = mapping->a_ops->writepage(page, &wbc); 270 if (!PageWriteback(page)) 271 ClearPageReclaim(page); 272 if (!ret) 273 goto put; 274 } 275 unlock_page(page); 276 put: 277 put_page(page); 278 } 279 } 280 281 void 282 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 283 struct sg_table *pages, 284 bool needs_clflush) 285 { 286 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 287 288 if (obj->mm.madv == I915_MADV_DONTNEED) 289 obj->mm.dirty = false; 290 291 if (needs_clflush && 292 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 293 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 294 drm_clflush_sg(pages); 295 296 __start_cpu_write(obj); 297 } 298 299 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 300 { 301 struct sgt_iter sgt_iter; 302 struct pagevec pvec; 303 struct page *page; 304 305 GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev))); 306 __i915_gem_object_release_shmem(obj, pages, true); 307 308 i915_gem_gtt_finish_pages(obj, pages); 309 310 if (i915_gem_object_needs_bit17_swizzle(obj)) 311 i915_gem_object_save_bit_17_swizzle(obj, pages); 312 313 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); 314 315 pagevec_init(&pvec); 316 for_each_sgt_page(page, sgt_iter, pages) { 317 if (obj->mm.dirty) 318 set_page_dirty(page); 319 320 if (obj->mm.madv == I915_MADV_WILLNEED) 321 mark_page_accessed(page); 322 323 if (!pagevec_add(&pvec, page)) 324 check_release_pagevec(&pvec); 325 } 326 if (pagevec_count(&pvec)) 327 check_release_pagevec(&pvec); 328 obj->mm.dirty = false; 329 330 sg_free_table(pages); 331 kfree(pages); 332 } 333 334 static void 335 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 336 { 337 if (likely(i915_gem_object_has_struct_page(obj))) 338 i915_gem_object_put_pages_shmem(obj, pages); 339 else 340 i915_gem_object_put_pages_phys(obj, pages); 341 } 342 343 static int 344 shmem_pwrite(struct drm_i915_gem_object *obj, 345 const struct drm_i915_gem_pwrite *arg) 346 { 347 struct address_space *mapping = obj->base.filp->f_mapping; 348 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 349 u64 remain, offset; 350 unsigned int pg; 351 352 /* Caller already validated user args */ 353 GEM_BUG_ON(!access_ok(user_data, arg->size)); 354 355 if (!i915_gem_object_has_struct_page(obj)) 356 return i915_gem_object_pwrite_phys(obj, arg); 357 358 /* 359 * Before we instantiate/pin the backing store for our use, we 360 * can prepopulate the shmemfs filp efficiently using a write into 361 * the pagecache. We avoid the penalty of instantiating all the 362 * pages, important if the user is just writing to a few and never 363 * uses the object on the GPU, and using a direct write into shmemfs 364 * allows it to avoid the cost of retrieving a page (either swapin 365 * or clearing-before-use) before it is overwritten. 366 */ 367 if (i915_gem_object_has_pages(obj)) 368 return -ENODEV; 369 370 if (obj->mm.madv != I915_MADV_WILLNEED) 371 return -EFAULT; 372 373 /* 374 * Before the pages are instantiated the object is treated as being 375 * in the CPU domain. The pages will be clflushed as required before 376 * use, and we can freely write into the pages directly. If userspace 377 * races pwrite with any other operation; corruption will ensue - 378 * that is userspace's prerogative! 379 */ 380 381 remain = arg->size; 382 offset = arg->offset; 383 pg = offset_in_page(offset); 384 385 do { 386 unsigned int len, unwritten; 387 struct page *page; 388 void *data, *vaddr; 389 int err; 390 char c; 391 392 len = PAGE_SIZE - pg; 393 if (len > remain) 394 len = remain; 395 396 /* Prefault the user page to reduce potential recursion */ 397 err = __get_user(c, user_data); 398 if (err) 399 return err; 400 401 err = __get_user(c, user_data + len - 1); 402 if (err) 403 return err; 404 405 err = pagecache_write_begin(obj->base.filp, mapping, 406 offset, len, 0, 407 &page, &data); 408 if (err < 0) 409 return err; 410 411 vaddr = kmap_atomic(page); 412 unwritten = __copy_from_user_inatomic(vaddr + pg, 413 user_data, 414 len); 415 kunmap_atomic(vaddr); 416 417 err = pagecache_write_end(obj->base.filp, mapping, 418 offset, len, len - unwritten, 419 page, data); 420 if (err < 0) 421 return err; 422 423 /* We don't handle -EFAULT, leave it to the caller to check */ 424 if (unwritten) 425 return -ENODEV; 426 427 remain -= len; 428 user_data += len; 429 offset += len; 430 pg = 0; 431 } while (remain); 432 433 return 0; 434 } 435 436 static int 437 shmem_pread(struct drm_i915_gem_object *obj, 438 const struct drm_i915_gem_pread *arg) 439 { 440 if (!i915_gem_object_has_struct_page(obj)) 441 return i915_gem_object_pread_phys(obj, arg); 442 443 return -ENODEV; 444 } 445 446 static void shmem_release(struct drm_i915_gem_object *obj) 447 { 448 if (i915_gem_object_has_struct_page(obj)) 449 i915_gem_object_release_memory_region(obj); 450 451 fput(obj->base.filp); 452 } 453 454 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 455 .name = "i915_gem_object_shmem", 456 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 457 458 .get_pages = shmem_get_pages, 459 .put_pages = shmem_put_pages, 460 .truncate = shmem_truncate, 461 .writeback = shmem_writeback, 462 463 .pwrite = shmem_pwrite, 464 .pread = shmem_pread, 465 466 .release = shmem_release, 467 }; 468 469 static int __create_shmem(struct drm_i915_private *i915, 470 struct drm_gem_object *obj, 471 resource_size_t size) 472 { 473 unsigned long flags = VM_NORESERVE; 474 struct file *filp; 475 476 drm_gem_private_object_init(&i915->drm, obj, size); 477 478 if (i915->mm.gemfs) 479 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 480 flags); 481 else 482 filp = shmem_file_setup("i915", size, flags); 483 if (IS_ERR(filp)) 484 return PTR_ERR(filp); 485 486 obj->filp = filp; 487 return 0; 488 } 489 490 static int shmem_object_init(struct intel_memory_region *mem, 491 struct drm_i915_gem_object *obj, 492 resource_size_t size, 493 resource_size_t page_size, 494 unsigned int flags) 495 { 496 static struct lock_class_key lock_class; 497 struct drm_i915_private *i915 = mem->i915; 498 struct address_space *mapping; 499 unsigned int cache_level; 500 gfp_t mask; 501 int ret; 502 503 ret = __create_shmem(i915, &obj->base, size); 504 if (ret) 505 return ret; 506 507 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 508 if (IS_I965GM(i915) || IS_I965G(i915)) { 509 /* 965gm cannot relocate objects above 4GiB. */ 510 mask &= ~__GFP_HIGHMEM; 511 mask |= __GFP_DMA32; 512 } 513 514 mapping = obj->base.filp->f_mapping; 515 mapping_set_gfp_mask(mapping, mask); 516 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 517 518 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 519 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 520 obj->write_domain = I915_GEM_DOMAIN_CPU; 521 obj->read_domains = I915_GEM_DOMAIN_CPU; 522 523 if (HAS_LLC(i915)) 524 /* On some devices, we can have the GPU use the LLC (the CPU 525 * cache) for about a 10% performance improvement 526 * compared to uncached. Graphics requests other than 527 * display scanout are coherent with the CPU in 528 * accessing this cache. This means in this mode we 529 * don't need to clflush on the CPU side, and on the 530 * GPU side we only need to flush internal caches to 531 * get data visible to the CPU. 532 * 533 * However, we maintain the display planes as UC, and so 534 * need to rebind when first used as such. 535 */ 536 cache_level = I915_CACHE_LLC; 537 else 538 cache_level = I915_CACHE_NONE; 539 540 i915_gem_object_set_cache_coherency(obj, cache_level); 541 542 i915_gem_object_init_memory_region(obj, mem); 543 544 return 0; 545 } 546 547 struct drm_i915_gem_object * 548 i915_gem_object_create_shmem(struct drm_i915_private *i915, 549 resource_size_t size) 550 { 551 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 552 size, 0, 0); 553 } 554 555 /* Allocate a new GEM object and fill it with the supplied data */ 556 struct drm_i915_gem_object * 557 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 558 const void *data, resource_size_t size) 559 { 560 struct drm_i915_gem_object *obj; 561 struct file *file; 562 resource_size_t offset; 563 int err; 564 565 GEM_WARN_ON(IS_DGFX(dev_priv)); 566 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 567 if (IS_ERR(obj)) 568 return obj; 569 570 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 571 572 file = obj->base.filp; 573 offset = 0; 574 do { 575 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 576 struct page *page; 577 void *pgdata, *vaddr; 578 579 err = pagecache_write_begin(file, file->f_mapping, 580 offset, len, 0, 581 &page, &pgdata); 582 if (err < 0) 583 goto fail; 584 585 vaddr = kmap(page); 586 memcpy(vaddr, data, len); 587 kunmap(page); 588 589 err = pagecache_write_end(file, file->f_mapping, 590 offset, len, len, 591 page, pgdata); 592 if (err < 0) 593 goto fail; 594 595 size -= len; 596 data += len; 597 offset += len; 598 } while (size); 599 600 return obj; 601 602 fail: 603 i915_gem_object_put(obj); 604 return ERR_PTR(err); 605 } 606 607 static int init_shmem(struct intel_memory_region *mem) 608 { 609 int err; 610 611 err = i915_gemfs_init(mem->i915); 612 if (err) { 613 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 614 err); 615 } 616 617 intel_memory_region_set_name(mem, "system"); 618 619 return 0; /* Don't error, we can simply fallback to the kernel mnt */ 620 } 621 622 static void release_shmem(struct intel_memory_region *mem) 623 { 624 i915_gemfs_fini(mem->i915); 625 } 626 627 static const struct intel_memory_region_ops shmem_region_ops = { 628 .init = init_shmem, 629 .release = release_shmem, 630 .init_object = shmem_object_init, 631 }; 632 633 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 634 u16 type, u16 instance) 635 { 636 return intel_memory_region_create(i915, 0, 637 totalram_pages() << PAGE_SHIFT, 638 PAGE_SIZE, 0, 639 type, instance, 640 &shmem_region_ops); 641 } 642 643 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 644 { 645 return obj->ops == &i915_gem_shmem_ops; 646 } 647