1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/pagevec.h> 8 #include <linux/swap.h> 9 10 #include "i915_drv.h" 11 #include "i915_gem_object.h" 12 #include "i915_scatterlist.h" 13 14 /* 15 * Move pages to appropriate lru and release the pagevec, decrementing the 16 * ref count of those pages. 17 */ 18 static void check_release_pagevec(struct pagevec *pvec) 19 { 20 check_move_unevictable_pages(pvec); 21 __pagevec_release(pvec); 22 cond_resched(); 23 } 24 25 static int shmem_get_pages(struct drm_i915_gem_object *obj) 26 { 27 struct drm_i915_private *i915 = to_i915(obj->base.dev); 28 const unsigned long page_count = obj->base.size / PAGE_SIZE; 29 unsigned long i; 30 struct address_space *mapping; 31 struct sg_table *st; 32 struct scatterlist *sg; 33 struct sgt_iter sgt_iter; 34 struct page *page; 35 unsigned long last_pfn = 0; /* suppress gcc warning */ 36 unsigned int max_segment = i915_sg_segment_size(); 37 unsigned int sg_page_sizes; 38 struct pagevec pvec; 39 gfp_t noreclaim; 40 int ret; 41 42 /* 43 * Assert that the object is not currently in any GPU domain. As it 44 * wasn't in the GTT, there shouldn't be any way it could have been in 45 * a GPU cache 46 */ 47 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 48 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 49 50 /* 51 * If there's no chance of allocating enough pages for the whole 52 * object, bail early. 53 */ 54 if (page_count > totalram_pages()) 55 return -ENOMEM; 56 57 st = kmalloc(sizeof(*st), GFP_KERNEL); 58 if (!st) 59 return -ENOMEM; 60 61 rebuild_st: 62 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 63 kfree(st); 64 return -ENOMEM; 65 } 66 67 /* 68 * Get the list of pages out of our struct file. They'll be pinned 69 * at this point until we release them. 70 * 71 * Fail silently without starting the shrinker 72 */ 73 mapping = obj->base.filp->f_mapping; 74 mapping_set_unevictable(mapping); 75 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 76 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 77 78 sg = st->sgl; 79 st->nents = 0; 80 sg_page_sizes = 0; 81 for (i = 0; i < page_count; i++) { 82 const unsigned int shrink[] = { 83 (I915_SHRINK_BOUND | 84 I915_SHRINK_UNBOUND | 85 I915_SHRINK_PURGEABLE), 86 0, 87 }, *s = shrink; 88 gfp_t gfp = noreclaim; 89 90 do { 91 cond_resched(); 92 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 93 if (!IS_ERR(page)) 94 break; 95 96 if (!*s) { 97 ret = PTR_ERR(page); 98 goto err_sg; 99 } 100 101 i915_gem_shrink(i915, 2 * page_count, NULL, *s++); 102 103 /* 104 * We've tried hard to allocate the memory by reaping 105 * our own buffer, now let the real VM do its job and 106 * go down in flames if truly OOM. 107 * 108 * However, since graphics tend to be disposable, 109 * defer the oom here by reporting the ENOMEM back 110 * to userspace. 111 */ 112 if (!*s) { 113 /* reclaim and warn, but no oom */ 114 gfp = mapping_gfp_mask(mapping); 115 116 /* 117 * Our bo are always dirty and so we require 118 * kswapd to reclaim our pages (direct reclaim 119 * does not effectively begin pageout of our 120 * buffers on its own). However, direct reclaim 121 * only waits for kswapd when under allocation 122 * congestion. So as a result __GFP_RECLAIM is 123 * unreliable and fails to actually reclaim our 124 * dirty pages -- unless you try over and over 125 * again with !__GFP_NORETRY. However, we still 126 * want to fail this allocation rather than 127 * trigger the out-of-memory killer and for 128 * this we want __GFP_RETRY_MAYFAIL. 129 */ 130 gfp |= __GFP_RETRY_MAYFAIL; 131 } 132 } while (1); 133 134 if (!i || 135 sg->length >= max_segment || 136 page_to_pfn(page) != last_pfn + 1) { 137 if (i) { 138 sg_page_sizes |= sg->length; 139 sg = sg_next(sg); 140 } 141 st->nents++; 142 sg_set_page(sg, page, PAGE_SIZE, 0); 143 } else { 144 sg->length += PAGE_SIZE; 145 } 146 last_pfn = page_to_pfn(page); 147 148 /* Check that the i965g/gm workaround works. */ 149 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 150 } 151 if (sg) { /* loop terminated early; short sg table */ 152 sg_page_sizes |= sg->length; 153 sg_mark_end(sg); 154 } 155 156 /* Trim unused sg entries to avoid wasting memory. */ 157 i915_sg_trim(st); 158 159 ret = i915_gem_gtt_prepare_pages(obj, st); 160 if (ret) { 161 /* 162 * DMA remapping failed? One possible cause is that 163 * it could not reserve enough large entries, asking 164 * for PAGE_SIZE chunks instead may be helpful. 165 */ 166 if (max_segment > PAGE_SIZE) { 167 for_each_sgt_page(page, sgt_iter, st) 168 put_page(page); 169 sg_free_table(st); 170 171 max_segment = PAGE_SIZE; 172 goto rebuild_st; 173 } else { 174 dev_warn(&i915->drm.pdev->dev, 175 "Failed to DMA remap %lu pages\n", 176 page_count); 177 goto err_pages; 178 } 179 } 180 181 if (i915_gem_object_needs_bit17_swizzle(obj)) 182 i915_gem_object_do_bit_17_swizzle(obj, st); 183 184 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 185 186 return 0; 187 188 err_sg: 189 sg_mark_end(sg); 190 err_pages: 191 mapping_clear_unevictable(mapping); 192 pagevec_init(&pvec); 193 for_each_sgt_page(page, sgt_iter, st) { 194 if (!pagevec_add(&pvec, page)) 195 check_release_pagevec(&pvec); 196 } 197 if (pagevec_count(&pvec)) 198 check_release_pagevec(&pvec); 199 sg_free_table(st); 200 kfree(st); 201 202 /* 203 * shmemfs first checks if there is enough memory to allocate the page 204 * and reports ENOSPC should there be insufficient, along with the usual 205 * ENOMEM for a genuine allocation failure. 206 * 207 * We use ENOSPC in our driver to mean that we have run out of aperture 208 * space and so want to translate the error from shmemfs back to our 209 * usual understanding of ENOMEM. 210 */ 211 if (ret == -ENOSPC) 212 ret = -ENOMEM; 213 214 return ret; 215 } 216 217 static void 218 shmem_truncate(struct drm_i915_gem_object *obj) 219 { 220 /* 221 * Our goal here is to return as much of the memory as 222 * is possible back to the system as we are called from OOM. 223 * To do this we must instruct the shmfs to drop all of its 224 * backing pages, *now*. 225 */ 226 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 227 obj->mm.madv = __I915_MADV_PURGED; 228 obj->mm.pages = ERR_PTR(-EFAULT); 229 } 230 231 static void 232 shmem_writeback(struct drm_i915_gem_object *obj) 233 { 234 struct address_space *mapping; 235 struct writeback_control wbc = { 236 .sync_mode = WB_SYNC_NONE, 237 .nr_to_write = SWAP_CLUSTER_MAX, 238 .range_start = 0, 239 .range_end = LLONG_MAX, 240 .for_reclaim = 1, 241 }; 242 unsigned long i; 243 244 /* 245 * Leave mmapings intact (GTT will have been revoked on unbinding, 246 * leaving only CPU mmapings around) and add those pages to the LRU 247 * instead of invoking writeback so they are aged and paged out 248 * as normal. 249 */ 250 mapping = obj->base.filp->f_mapping; 251 252 /* Begin writeback on each dirty page */ 253 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { 254 struct page *page; 255 256 page = find_lock_entry(mapping, i); 257 if (!page || xa_is_value(page)) 258 continue; 259 260 if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 261 int ret; 262 263 SetPageReclaim(page); 264 ret = mapping->a_ops->writepage(page, &wbc); 265 if (!PageWriteback(page)) 266 ClearPageReclaim(page); 267 if (!ret) 268 goto put; 269 } 270 unlock_page(page); 271 put: 272 put_page(page); 273 } 274 } 275 276 void 277 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 278 struct sg_table *pages, 279 bool needs_clflush) 280 { 281 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 282 283 if (obj->mm.madv == I915_MADV_DONTNEED) 284 obj->mm.dirty = false; 285 286 if (needs_clflush && 287 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 288 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 289 drm_clflush_sg(pages); 290 291 __start_cpu_write(obj); 292 } 293 294 static void 295 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 296 { 297 struct sgt_iter sgt_iter; 298 struct pagevec pvec; 299 struct page *page; 300 301 __i915_gem_object_release_shmem(obj, pages, true); 302 303 i915_gem_gtt_finish_pages(obj, pages); 304 305 if (i915_gem_object_needs_bit17_swizzle(obj)) 306 i915_gem_object_save_bit_17_swizzle(obj, pages); 307 308 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); 309 310 pagevec_init(&pvec); 311 for_each_sgt_page(page, sgt_iter, pages) { 312 if (obj->mm.dirty) 313 set_page_dirty(page); 314 315 if (obj->mm.madv == I915_MADV_WILLNEED) 316 mark_page_accessed(page); 317 318 if (!pagevec_add(&pvec, page)) 319 check_release_pagevec(&pvec); 320 } 321 if (pagevec_count(&pvec)) 322 check_release_pagevec(&pvec); 323 obj->mm.dirty = false; 324 325 sg_free_table(pages); 326 kfree(pages); 327 } 328 329 static int 330 shmem_pwrite(struct drm_i915_gem_object *obj, 331 const struct drm_i915_gem_pwrite *arg) 332 { 333 struct address_space *mapping = obj->base.filp->f_mapping; 334 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 335 u64 remain, offset; 336 unsigned int pg; 337 338 /* Caller already validated user args */ 339 GEM_BUG_ON(!access_ok(user_data, arg->size)); 340 341 /* 342 * Before we instantiate/pin the backing store for our use, we 343 * can prepopulate the shmemfs filp efficiently using a write into 344 * the pagecache. We avoid the penalty of instantiating all the 345 * pages, important if the user is just writing to a few and never 346 * uses the object on the GPU, and using a direct write into shmemfs 347 * allows it to avoid the cost of retrieving a page (either swapin 348 * or clearing-before-use) before it is overwritten. 349 */ 350 if (i915_gem_object_has_pages(obj)) 351 return -ENODEV; 352 353 if (obj->mm.madv != I915_MADV_WILLNEED) 354 return -EFAULT; 355 356 /* 357 * Before the pages are instantiated the object is treated as being 358 * in the CPU domain. The pages will be clflushed as required before 359 * use, and we can freely write into the pages directly. If userspace 360 * races pwrite with any other operation; corruption will ensue - 361 * that is userspace's prerogative! 362 */ 363 364 remain = arg->size; 365 offset = arg->offset; 366 pg = offset_in_page(offset); 367 368 do { 369 unsigned int len, unwritten; 370 struct page *page; 371 void *data, *vaddr; 372 int err; 373 char c; 374 375 len = PAGE_SIZE - pg; 376 if (len > remain) 377 len = remain; 378 379 /* Prefault the user page to reduce potential recursion */ 380 err = __get_user(c, user_data); 381 if (err) 382 return err; 383 384 err = __get_user(c, user_data + len - 1); 385 if (err) 386 return err; 387 388 err = pagecache_write_begin(obj->base.filp, mapping, 389 offset, len, 0, 390 &page, &data); 391 if (err < 0) 392 return err; 393 394 vaddr = kmap_atomic(page); 395 unwritten = __copy_from_user_inatomic(vaddr + pg, 396 user_data, 397 len); 398 kunmap_atomic(vaddr); 399 400 err = pagecache_write_end(obj->base.filp, mapping, 401 offset, len, len - unwritten, 402 page, data); 403 if (err < 0) 404 return err; 405 406 /* We don't handle -EFAULT, leave it to the caller to check */ 407 if (unwritten) 408 return -ENODEV; 409 410 remain -= len; 411 user_data += len; 412 offset += len; 413 pg = 0; 414 } while (remain); 415 416 return 0; 417 } 418 419 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 420 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 421 I915_GEM_OBJECT_IS_SHRINKABLE, 422 423 .get_pages = shmem_get_pages, 424 .put_pages = shmem_put_pages, 425 .truncate = shmem_truncate, 426 .writeback = shmem_writeback, 427 428 .pwrite = shmem_pwrite, 429 }; 430 431 static int create_shmem(struct drm_i915_private *i915, 432 struct drm_gem_object *obj, 433 size_t size) 434 { 435 unsigned long flags = VM_NORESERVE; 436 struct file *filp; 437 438 drm_gem_private_object_init(&i915->drm, obj, size); 439 440 if (i915->mm.gemfs) 441 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 442 flags); 443 else 444 filp = shmem_file_setup("i915", size, flags); 445 if (IS_ERR(filp)) 446 return PTR_ERR(filp); 447 448 obj->filp = filp; 449 return 0; 450 } 451 452 struct drm_i915_gem_object * 453 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size) 454 { 455 struct drm_i915_gem_object *obj; 456 struct address_space *mapping; 457 unsigned int cache_level; 458 gfp_t mask; 459 int ret; 460 461 /* There is a prevalence of the assumption that we fit the object's 462 * page count inside a 32bit _signed_ variable. Let's document this and 463 * catch if we ever need to fix it. In the meantime, if you do spot 464 * such a local variable, please consider fixing! 465 */ 466 if (size >> PAGE_SHIFT > INT_MAX) 467 return ERR_PTR(-E2BIG); 468 469 if (overflows_type(size, obj->base.size)) 470 return ERR_PTR(-E2BIG); 471 472 obj = i915_gem_object_alloc(); 473 if (!obj) 474 return ERR_PTR(-ENOMEM); 475 476 ret = create_shmem(i915, &obj->base, size); 477 if (ret) 478 goto fail; 479 480 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 481 if (IS_I965GM(i915) || IS_I965G(i915)) { 482 /* 965gm cannot relocate objects above 4GiB. */ 483 mask &= ~__GFP_HIGHMEM; 484 mask |= __GFP_DMA32; 485 } 486 487 mapping = obj->base.filp->f_mapping; 488 mapping_set_gfp_mask(mapping, mask); 489 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 490 491 i915_gem_object_init(obj, &i915_gem_shmem_ops); 492 493 obj->write_domain = I915_GEM_DOMAIN_CPU; 494 obj->read_domains = I915_GEM_DOMAIN_CPU; 495 496 if (HAS_LLC(i915)) 497 /* On some devices, we can have the GPU use the LLC (the CPU 498 * cache) for about a 10% performance improvement 499 * compared to uncached. Graphics requests other than 500 * display scanout are coherent with the CPU in 501 * accessing this cache. This means in this mode we 502 * don't need to clflush on the CPU side, and on the 503 * GPU side we only need to flush internal caches to 504 * get data visible to the CPU. 505 * 506 * However, we maintain the display planes as UC, and so 507 * need to rebind when first used as such. 508 */ 509 cache_level = I915_CACHE_LLC; 510 else 511 cache_level = I915_CACHE_NONE; 512 513 i915_gem_object_set_cache_coherency(obj, cache_level); 514 515 trace_i915_gem_object_create(obj); 516 517 return obj; 518 519 fail: 520 i915_gem_object_free(obj); 521 return ERR_PTR(ret); 522 } 523 524 /* Allocate a new GEM object and fill it with the supplied data */ 525 struct drm_i915_gem_object * 526 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 527 const void *data, size_t size) 528 { 529 struct drm_i915_gem_object *obj; 530 struct file *file; 531 size_t offset; 532 int err; 533 534 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 535 if (IS_ERR(obj)) 536 return obj; 537 538 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 539 540 file = obj->base.filp; 541 offset = 0; 542 do { 543 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 544 struct page *page; 545 void *pgdata, *vaddr; 546 547 err = pagecache_write_begin(file, file->f_mapping, 548 offset, len, 0, 549 &page, &pgdata); 550 if (err < 0) 551 goto fail; 552 553 vaddr = kmap(page); 554 memcpy(vaddr, data, len); 555 kunmap(page); 556 557 err = pagecache_write_end(file, file->f_mapping, 558 offset, len, len, 559 page, pgdata); 560 if (err < 0) 561 goto fail; 562 563 size -= len; 564 data += len; 565 offset += len; 566 } while (size); 567 568 return obj; 569 570 fail: 571 i915_gem_object_put(obj); 572 return ERR_PTR(err); 573 } 574