18475355fSChris Wilson /* 28475355fSChris Wilson * SPDX-License-Identifier: MIT 38475355fSChris Wilson * 48475355fSChris Wilson * Copyright © 2014-2016 Intel Corporation 58475355fSChris Wilson */ 68475355fSChris Wilson 78475355fSChris Wilson #include <linux/pagevec.h> 882508de2SJani Nikula #include <linux/shmem_fs.h> 98475355fSChris Wilson #include <linux/swap.h> 108475355fSChris Wilson 115f2ec909SJani Nikula #include <drm/drm_cache.h> 125f2ec909SJani Nikula 13da1184cdSMatthew Auld #include "gem/i915_gem_region.h" 148475355fSChris Wilson #include "i915_drv.h" 158475355fSChris Wilson #include "i915_gem_object.h" 160438fd1aSJani Nikula #include "i915_gem_tiling.h" 170438fd1aSJani Nikula #include "i915_gemfs.h" 1837d63f8fSChris Wilson #include "i915_scatterlist.h" 19a09d9a80SJani Nikula #include "i915_trace.h" 208475355fSChris Wilson 218475355fSChris Wilson /* 228475355fSChris Wilson * Move pages to appropriate lru and release the pagevec, decrementing the 238475355fSChris Wilson * ref count of those pages. 248475355fSChris Wilson */ 258475355fSChris Wilson static void check_release_pagevec(struct pagevec *pvec) 268475355fSChris Wilson { 278475355fSChris Wilson check_move_unevictable_pages(pvec); 288475355fSChris Wilson __pagevec_release(pvec); 298475355fSChris Wilson cond_resched(); 308475355fSChris Wilson } 318475355fSChris Wilson 32cad7109aSThomas Hellström void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, 33f05b985eSThomas Hellström bool dirty, bool backup) 348475355fSChris Wilson { 35f05b985eSThomas Hellström struct sgt_iter sgt_iter; 36f05b985eSThomas Hellström struct pagevec pvec; 37f05b985eSThomas Hellström struct page *page; 38f05b985eSThomas Hellström 39f05b985eSThomas Hellström mapping_clear_unevictable(mapping); 40f05b985eSThomas Hellström 41f05b985eSThomas Hellström pagevec_init(&pvec); 42f05b985eSThomas Hellström for_each_sgt_page(page, sgt_iter, st) { 43f05b985eSThomas Hellström if (dirty) 44f05b985eSThomas Hellström set_page_dirty(page); 45f05b985eSThomas Hellström 46f05b985eSThomas Hellström if (backup) 47f05b985eSThomas Hellström mark_page_accessed(page); 48f05b985eSThomas Hellström 49f05b985eSThomas Hellström if (!pagevec_add(&pvec, page)) 50f05b985eSThomas Hellström check_release_pagevec(&pvec); 51f05b985eSThomas Hellström } 52f05b985eSThomas Hellström if (pagevec_count(&pvec)) 53f05b985eSThomas Hellström check_release_pagevec(&pvec); 54f05b985eSThomas Hellström 55f05b985eSThomas Hellström sg_free_table(st); 56f05b985eSThomas Hellström } 57f05b985eSThomas Hellström 58cad7109aSThomas Hellström int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, 59f05b985eSThomas Hellström size_t size, struct intel_memory_region *mr, 60f05b985eSThomas Hellström struct address_space *mapping, 61f05b985eSThomas Hellström unsigned int max_segment) 62f05b985eSThomas Hellström { 63c3bfba9aSChris Wilson unsigned int page_count; /* restricted by sg_alloc_table */ 648475355fSChris Wilson unsigned long i; 658475355fSChris Wilson struct scatterlist *sg; 668475355fSChris Wilson struct page *page; 678475355fSChris Wilson unsigned long last_pfn = 0; /* suppress gcc warning */ 688475355fSChris Wilson gfp_t noreclaim; 698475355fSChris Wilson int ret; 708475355fSChris Wilson 71c3bfba9aSChris Wilson if (overflows_type(size / PAGE_SIZE, page_count)) 72c3bfba9aSChris Wilson return -E2BIG; 73c3bfba9aSChris Wilson 74c3bfba9aSChris Wilson page_count = size / PAGE_SIZE; 758475355fSChris Wilson /* 768475355fSChris Wilson * If there's no chance of allocating enough pages for the whole 778475355fSChris Wilson * object, bail early. 788475355fSChris Wilson */ 79f05b985eSThomas Hellström if (size > resource_size(&mr->region)) 80cad7109aSThomas Hellström return -ENOMEM; 818475355fSChris Wilson 82a8c18becSChris Wilson if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN)) 83cad7109aSThomas Hellström return -ENOMEM; 848475355fSChris Wilson 858475355fSChris Wilson /* 868475355fSChris Wilson * Get the list of pages out of our struct file. They'll be pinned 878475355fSChris Wilson * at this point until we release them. 888475355fSChris Wilson * 898475355fSChris Wilson * Fail silently without starting the shrinker 908475355fSChris Wilson */ 918475355fSChris Wilson mapping_set_unevictable(mapping); 928475355fSChris Wilson noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 938475355fSChris Wilson noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 948475355fSChris Wilson 958475355fSChris Wilson sg = st->sgl; 968475355fSChris Wilson st->nents = 0; 978475355fSChris Wilson for (i = 0; i < page_count; i++) { 988475355fSChris Wilson const unsigned int shrink[] = { 993b4fa964SChris Wilson I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 1008475355fSChris Wilson 0, 1018475355fSChris Wilson }, *s = shrink; 1028475355fSChris Wilson gfp_t gfp = noreclaim; 1038475355fSChris Wilson 1048475355fSChris Wilson do { 1058475355fSChris Wilson cond_resched(); 1068475355fSChris Wilson page = shmem_read_mapping_page_gfp(mapping, i, gfp); 1078475355fSChris Wilson if (!IS_ERR(page)) 1088475355fSChris Wilson break; 1098475355fSChris Wilson 1108475355fSChris Wilson if (!*s) { 1118475355fSChris Wilson ret = PTR_ERR(page); 1128475355fSChris Wilson goto err_sg; 1138475355fSChris Wilson } 1148475355fSChris Wilson 115cf41a8f1SMaarten Lankhorst i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 1168475355fSChris Wilson 1178475355fSChris Wilson /* 1188475355fSChris Wilson * We've tried hard to allocate the memory by reaping 1198475355fSChris Wilson * our own buffer, now let the real VM do its job and 1208475355fSChris Wilson * go down in flames if truly OOM. 1218475355fSChris Wilson * 1228475355fSChris Wilson * However, since graphics tend to be disposable, 1238475355fSChris Wilson * defer the oom here by reporting the ENOMEM back 1248475355fSChris Wilson * to userspace. 1258475355fSChris Wilson */ 1268475355fSChris Wilson if (!*s) { 1278475355fSChris Wilson /* reclaim and warn, but no oom */ 1288475355fSChris Wilson gfp = mapping_gfp_mask(mapping); 1298475355fSChris Wilson 1308475355fSChris Wilson /* 1318475355fSChris Wilson * Our bo are always dirty and so we require 1328475355fSChris Wilson * kswapd to reclaim our pages (direct reclaim 1338475355fSChris Wilson * does not effectively begin pageout of our 1348475355fSChris Wilson * buffers on its own). However, direct reclaim 1358475355fSChris Wilson * only waits for kswapd when under allocation 1368475355fSChris Wilson * congestion. So as a result __GFP_RECLAIM is 1378475355fSChris Wilson * unreliable and fails to actually reclaim our 1388475355fSChris Wilson * dirty pages -- unless you try over and over 1398475355fSChris Wilson * again with !__GFP_NORETRY. However, we still 1408475355fSChris Wilson * want to fail this allocation rather than 1418475355fSChris Wilson * trigger the out-of-memory killer and for 1428475355fSChris Wilson * this we want __GFP_RETRY_MAYFAIL. 1438475355fSChris Wilson */ 144a8c18becSChris Wilson gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 1458475355fSChris Wilson } 1468475355fSChris Wilson } while (1); 1478475355fSChris Wilson 1488475355fSChris Wilson if (!i || 1498475355fSChris Wilson sg->length >= max_segment || 1508475355fSChris Wilson page_to_pfn(page) != last_pfn + 1) { 151f05b985eSThomas Hellström if (i) 1528475355fSChris Wilson sg = sg_next(sg); 153f05b985eSThomas Hellström 1548475355fSChris Wilson st->nents++; 1558475355fSChris Wilson sg_set_page(sg, page, PAGE_SIZE, 0); 1568475355fSChris Wilson } else { 1578475355fSChris Wilson sg->length += PAGE_SIZE; 1588475355fSChris Wilson } 1598475355fSChris Wilson last_pfn = page_to_pfn(page); 1608475355fSChris Wilson 1618475355fSChris Wilson /* Check that the i965g/gm workaround works. */ 162ea97c4caSChris Wilson GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 1638475355fSChris Wilson } 164f05b985eSThomas Hellström if (sg) /* loop terminated early; short sg table */ 1658475355fSChris Wilson sg_mark_end(sg); 1668475355fSChris Wilson 1678475355fSChris Wilson /* Trim unused sg entries to avoid wasting memory. */ 1688475355fSChris Wilson i915_sg_trim(st); 1698475355fSChris Wilson 170cad7109aSThomas Hellström return 0; 171f05b985eSThomas Hellström err_sg: 172f05b985eSThomas Hellström sg_mark_end(sg); 173f05b985eSThomas Hellström if (sg != st->sgl) { 174cad7109aSThomas Hellström shmem_sg_free_table(st, mapping, false, false); 175f05b985eSThomas Hellström } else { 176f05b985eSThomas Hellström mapping_clear_unevictable(mapping); 177f05b985eSThomas Hellström sg_free_table(st); 178f05b985eSThomas Hellström } 179f05b985eSThomas Hellström 180f05b985eSThomas Hellström /* 181f05b985eSThomas Hellström * shmemfs first checks if there is enough memory to allocate the page 182f05b985eSThomas Hellström * and reports ENOSPC should there be insufficient, along with the usual 183f05b985eSThomas Hellström * ENOMEM for a genuine allocation failure. 184f05b985eSThomas Hellström * 185f05b985eSThomas Hellström * We use ENOSPC in our driver to mean that we have run out of aperture 186f05b985eSThomas Hellström * space and so want to translate the error from shmemfs back to our 187f05b985eSThomas Hellström * usual understanding of ENOMEM. 188f05b985eSThomas Hellström */ 189f05b985eSThomas Hellström if (ret == -ENOSPC) 190f05b985eSThomas Hellström ret = -ENOMEM; 191f05b985eSThomas Hellström 192cad7109aSThomas Hellström return ret; 193f05b985eSThomas Hellström } 194f05b985eSThomas Hellström 195f05b985eSThomas Hellström static int shmem_get_pages(struct drm_i915_gem_object *obj) 196f05b985eSThomas Hellström { 197f05b985eSThomas Hellström struct drm_i915_private *i915 = to_i915(obj->base.dev); 198f05b985eSThomas Hellström struct intel_memory_region *mem = obj->mm.region; 199f05b985eSThomas Hellström struct address_space *mapping = obj->base.filp->f_mapping; 20078a07fe7SRobert Beckett unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); 201f05b985eSThomas Hellström struct sg_table *st; 202f05b985eSThomas Hellström struct sgt_iter sgt_iter; 203f05b985eSThomas Hellström struct page *page; 204f05b985eSThomas Hellström int ret; 205f05b985eSThomas Hellström 206f05b985eSThomas Hellström /* 207f05b985eSThomas Hellström * Assert that the object is not currently in any GPU domain. As it 208f05b985eSThomas Hellström * wasn't in the GTT, there shouldn't be any way it could have been in 209f05b985eSThomas Hellström * a GPU cache 210f05b985eSThomas Hellström */ 211f05b985eSThomas Hellström GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 212f05b985eSThomas Hellström GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 213f05b985eSThomas Hellström 214f05b985eSThomas Hellström rebuild_st: 215a8c18becSChris Wilson st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN); 216cad7109aSThomas Hellström if (!st) 217cad7109aSThomas Hellström return -ENOMEM; 218cad7109aSThomas Hellström 219cad7109aSThomas Hellström ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, 220cad7109aSThomas Hellström max_segment); 221cad7109aSThomas Hellström if (ret) 222f05b985eSThomas Hellström goto err_st; 223f05b985eSThomas Hellström 2248475355fSChris Wilson ret = i915_gem_gtt_prepare_pages(obj, st); 2258475355fSChris Wilson if (ret) { 2268475355fSChris Wilson /* 2278475355fSChris Wilson * DMA remapping failed? One possible cause is that 2288475355fSChris Wilson * it could not reserve enough large entries, asking 2298475355fSChris Wilson * for PAGE_SIZE chunks instead may be helpful. 2308475355fSChris Wilson */ 2318475355fSChris Wilson if (max_segment > PAGE_SIZE) { 2328475355fSChris Wilson for_each_sgt_page(page, sgt_iter, st) 2338475355fSChris Wilson put_page(page); 2348475355fSChris Wilson sg_free_table(st); 235f05b985eSThomas Hellström kfree(st); 2368475355fSChris Wilson 2378475355fSChris Wilson max_segment = PAGE_SIZE; 2388475355fSChris Wilson goto rebuild_st; 2398475355fSChris Wilson } else { 2408ff5446aSThomas Zimmermann dev_warn(i915->drm.dev, 241cc328c9eSNirmoy Das "Failed to DMA remap %zu pages\n", 242c3bfba9aSChris Wilson obj->base.size >> PAGE_SHIFT); 2438475355fSChris Wilson goto err_pages; 2448475355fSChris Wilson } 2458475355fSChris Wilson } 2468475355fSChris Wilson 2478475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj)) 2488475355fSChris Wilson i915_gem_object_do_bit_17_swizzle(obj, st); 2498475355fSChris Wilson 25030f1dccdSMatthew Auld if (i915_gem_object_can_bypass_llc(obj)) 25113d29c82SMatthew Auld obj->cache_dirty = true; 25213d29c82SMatthew Auld 2538c949515SMatthew Auld __i915_gem_object_set_pages(obj, st); 2548475355fSChris Wilson 2558475355fSChris Wilson return 0; 2568475355fSChris Wilson 2578475355fSChris Wilson err_pages: 258cad7109aSThomas Hellström shmem_sg_free_table(st, mapping, false, false); 2598475355fSChris Wilson /* 2608475355fSChris Wilson * shmemfs first checks if there is enough memory to allocate the page 2618475355fSChris Wilson * and reports ENOSPC should there be insufficient, along with the usual 2628475355fSChris Wilson * ENOMEM for a genuine allocation failure. 2638475355fSChris Wilson * 2648475355fSChris Wilson * We use ENOSPC in our driver to mean that we have run out of aperture 2658475355fSChris Wilson * space and so want to translate the error from shmemfs back to our 2668475355fSChris Wilson * usual understanding of ENOMEM. 2678475355fSChris Wilson */ 268f05b985eSThomas Hellström err_st: 2698475355fSChris Wilson if (ret == -ENOSPC) 2708475355fSChris Wilson ret = -ENOMEM; 2718475355fSChris Wilson 272cad7109aSThomas Hellström kfree(st); 273cad7109aSThomas Hellström 2748475355fSChris Wilson return ret; 2758475355fSChris Wilson } 2768475355fSChris Wilson 2777ae03459SMatthew Auld static int 278f033428dSChris Wilson shmem_truncate(struct drm_i915_gem_object *obj) 279f033428dSChris Wilson { 280f033428dSChris Wilson /* 281f033428dSChris Wilson * Our goal here is to return as much of the memory as 282f033428dSChris Wilson * is possible back to the system as we are called from OOM. 283f033428dSChris Wilson * To do this we must instruct the shmfs to drop all of its 284f033428dSChris Wilson * backing pages, *now*. 285f033428dSChris Wilson */ 286f033428dSChris Wilson shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 287f033428dSChris Wilson obj->mm.madv = __I915_MADV_PURGED; 288f033428dSChris Wilson obj->mm.pages = ERR_PTR(-EFAULT); 2897ae03459SMatthew Auld 2907ae03459SMatthew Auld return 0; 291f033428dSChris Wilson } 292f033428dSChris Wilson 2937ae03459SMatthew Auld void __shmem_writeback(size_t size, struct address_space *mapping) 294f033428dSChris Wilson { 295f033428dSChris Wilson struct writeback_control wbc = { 296f033428dSChris Wilson .sync_mode = WB_SYNC_NONE, 297f033428dSChris Wilson .nr_to_write = SWAP_CLUSTER_MAX, 298f033428dSChris Wilson .range_start = 0, 299f033428dSChris Wilson .range_end = LLONG_MAX, 300f033428dSChris Wilson .for_reclaim = 1, 301f033428dSChris Wilson }; 302f033428dSChris Wilson unsigned long i; 303f033428dSChris Wilson 304f033428dSChris Wilson /* 305f033428dSChris Wilson * Leave mmapings intact (GTT will have been revoked on unbinding, 306f033428dSChris Wilson * leaving only CPU mmapings around) and add those pages to the LRU 307f033428dSChris Wilson * instead of invoking writeback so they are aged and paged out 308f033428dSChris Wilson * as normal. 309f033428dSChris Wilson */ 310f033428dSChris Wilson 311f033428dSChris Wilson /* Begin writeback on each dirty page */ 312f05b985eSThomas Hellström for (i = 0; i < size >> PAGE_SHIFT; i++) { 313f033428dSChris Wilson struct page *page; 314f033428dSChris Wilson 3159dfc8ff3SMatthew Wilcox (Oracle) page = find_lock_page(mapping, i); 3169dfc8ff3SMatthew Wilcox (Oracle) if (!page) 317f033428dSChris Wilson continue; 318f033428dSChris Wilson 319f033428dSChris Wilson if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 320f033428dSChris Wilson int ret; 321f033428dSChris Wilson 322f033428dSChris Wilson SetPageReclaim(page); 323f033428dSChris Wilson ret = mapping->a_ops->writepage(page, &wbc); 324f033428dSChris Wilson if (!PageWriteback(page)) 325f033428dSChris Wilson ClearPageReclaim(page); 326f033428dSChris Wilson if (!ret) 327f033428dSChris Wilson goto put; 328f033428dSChris Wilson } 329f033428dSChris Wilson unlock_page(page); 330f033428dSChris Wilson put: 331f033428dSChris Wilson put_page(page); 332f033428dSChris Wilson } 333f033428dSChris Wilson } 334f033428dSChris Wilson 335f05b985eSThomas Hellström static void 336f05b985eSThomas Hellström shmem_writeback(struct drm_i915_gem_object *obj) 337f05b985eSThomas Hellström { 338f05b985eSThomas Hellström __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); 339f05b985eSThomas Hellström } 340f05b985eSThomas Hellström 341ffa3fe08SMatthew Auld static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags) 34293544177SMatthew Auld { 34393544177SMatthew Auld switch (obj->mm.madv) { 34493544177SMatthew Auld case I915_MADV_DONTNEED: 34593544177SMatthew Auld return i915_gem_object_truncate(obj); 34693544177SMatthew Auld case __I915_MADV_PURGED: 34793544177SMatthew Auld return 0; 34893544177SMatthew Auld } 34993544177SMatthew Auld 350ffa3fe08SMatthew Auld if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK) 35193544177SMatthew Auld shmem_writeback(obj); 35293544177SMatthew Auld 35393544177SMatthew Auld return 0; 35493544177SMatthew Auld } 35593544177SMatthew Auld 3568475355fSChris Wilson void 3578475355fSChris Wilson __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 3588475355fSChris Wilson struct sg_table *pages, 3598475355fSChris Wilson bool needs_clflush) 3608475355fSChris Wilson { 361d70af579SMatthew Auld struct drm_i915_private *i915 = to_i915(obj->base.dev); 362d70af579SMatthew Auld 3638475355fSChris Wilson GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 3648475355fSChris Wilson 3658475355fSChris Wilson if (obj->mm.madv == I915_MADV_DONTNEED) 3668475355fSChris Wilson obj->mm.dirty = false; 3678475355fSChris Wilson 3688475355fSChris Wilson if (needs_clflush && 3698475355fSChris Wilson (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 3708475355fSChris Wilson !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 3718475355fSChris Wilson drm_clflush_sg(pages); 3728475355fSChris Wilson 3738475355fSChris Wilson __start_cpu_write(obj); 374d70af579SMatthew Auld /* 3750aeec60cSNiranjana Vishwanathapura * On non-LLC igfx platforms, force the flush-on-acquire if this is ever 376d70af579SMatthew Auld * swapped-in. Our async flush path is not trust worthy enough yet(and 377d70af579SMatthew Auld * happens in the wrong order), and with some tricks it's conceivable 378d70af579SMatthew Auld * for userspace to change the cache-level to I915_CACHE_NONE after the 379d70af579SMatthew Auld * pages are swapped-in, and since execbuf binds the object before doing 380d70af579SMatthew Auld * the async flush, we have a race window. 381d70af579SMatthew Auld */ 3820aeec60cSNiranjana Vishwanathapura if (!HAS_LLC(i915) && !IS_DGFX(i915)) 383d70af579SMatthew Auld obj->cache_dirty = true; 3848475355fSChris Wilson } 3858475355fSChris Wilson 386a85fffe3SMaarten Lankhorst void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 3878475355fSChris Wilson { 3888475355fSChris Wilson __i915_gem_object_release_shmem(obj, pages, true); 3898475355fSChris Wilson 3908475355fSChris Wilson i915_gem_gtt_finish_pages(obj, pages); 3918475355fSChris Wilson 3928475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj)) 3938475355fSChris Wilson i915_gem_object_save_bit_17_swizzle(obj, pages); 3948475355fSChris Wilson 395cad7109aSThomas Hellström shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, 396f05b985eSThomas Hellström obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); 397cad7109aSThomas Hellström kfree(pages); 3988475355fSChris Wilson obj->mm.dirty = false; 3998475355fSChris Wilson } 4008475355fSChris Wilson 401a85fffe3SMaarten Lankhorst static void 402a85fffe3SMaarten Lankhorst shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 403a85fffe3SMaarten Lankhorst { 404a85fffe3SMaarten Lankhorst if (likely(i915_gem_object_has_struct_page(obj))) 405a85fffe3SMaarten Lankhorst i915_gem_object_put_pages_shmem(obj, pages); 406a85fffe3SMaarten Lankhorst else 407a85fffe3SMaarten Lankhorst i915_gem_object_put_pages_phys(obj, pages); 408a85fffe3SMaarten Lankhorst } 409a85fffe3SMaarten Lankhorst 4108475355fSChris Wilson static int 4118475355fSChris Wilson shmem_pwrite(struct drm_i915_gem_object *obj, 4128475355fSChris Wilson const struct drm_i915_gem_pwrite *arg) 4138475355fSChris Wilson { 4148475355fSChris Wilson struct address_space *mapping = obj->base.filp->f_mapping; 415c5edd542SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops; 4168475355fSChris Wilson char __user *user_data = u64_to_user_ptr(arg->data_ptr); 4178475355fSChris Wilson u64 remain, offset; 4188475355fSChris Wilson unsigned int pg; 4198475355fSChris Wilson 4208475355fSChris Wilson /* Caller already validated user args */ 4218475355fSChris Wilson GEM_BUG_ON(!access_ok(user_data, arg->size)); 4228475355fSChris Wilson 423a6117097SMaarten Lankhorst if (!i915_gem_object_has_struct_page(obj)) 424a6117097SMaarten Lankhorst return i915_gem_object_pwrite_phys(obj, arg); 425a6117097SMaarten Lankhorst 4268475355fSChris Wilson /* 4278475355fSChris Wilson * Before we instantiate/pin the backing store for our use, we 4288475355fSChris Wilson * can prepopulate the shmemfs filp efficiently using a write into 4298475355fSChris Wilson * the pagecache. We avoid the penalty of instantiating all the 4308475355fSChris Wilson * pages, important if the user is just writing to a few and never 4318475355fSChris Wilson * uses the object on the GPU, and using a direct write into shmemfs 4328475355fSChris Wilson * allows it to avoid the cost of retrieving a page (either swapin 4338475355fSChris Wilson * or clearing-before-use) before it is overwritten. 4348475355fSChris Wilson */ 4358475355fSChris Wilson if (i915_gem_object_has_pages(obj)) 4368475355fSChris Wilson return -ENODEV; 4378475355fSChris Wilson 4388475355fSChris Wilson if (obj->mm.madv != I915_MADV_WILLNEED) 4398475355fSChris Wilson return -EFAULT; 4408475355fSChris Wilson 4418475355fSChris Wilson /* 4428475355fSChris Wilson * Before the pages are instantiated the object is treated as being 4438475355fSChris Wilson * in the CPU domain. The pages will be clflushed as required before 4448475355fSChris Wilson * use, and we can freely write into the pages directly. If userspace 4458475355fSChris Wilson * races pwrite with any other operation; corruption will ensue - 4468475355fSChris Wilson * that is userspace's prerogative! 4478475355fSChris Wilson */ 4488475355fSChris Wilson 4498475355fSChris Wilson remain = arg->size; 4508475355fSChris Wilson offset = arg->offset; 4518475355fSChris Wilson pg = offset_in_page(offset); 4528475355fSChris Wilson 4538475355fSChris Wilson do { 4548475355fSChris Wilson unsigned int len, unwritten; 4558475355fSChris Wilson struct page *page; 4568475355fSChris Wilson void *data, *vaddr; 4578475355fSChris Wilson int err; 458*ab438a61SJani Nikula char __maybe_unused c; 4598475355fSChris Wilson 4608475355fSChris Wilson len = PAGE_SIZE - pg; 4618475355fSChris Wilson if (len > remain) 4628475355fSChris Wilson len = remain; 4638475355fSChris Wilson 4648475355fSChris Wilson /* Prefault the user page to reduce potential recursion */ 4658475355fSChris Wilson err = __get_user(c, user_data); 4668475355fSChris Wilson if (err) 4678475355fSChris Wilson return err; 4688475355fSChris Wilson 4698475355fSChris Wilson err = __get_user(c, user_data + len - 1); 4708475355fSChris Wilson if (err) 4718475355fSChris Wilson return err; 4728475355fSChris Wilson 473c5edd542SMatthew Wilcox (Oracle) err = aops->write_begin(obj->base.filp, mapping, offset, len, 4748475355fSChris Wilson &page, &data); 4758475355fSChris Wilson if (err < 0) 4768475355fSChris Wilson return err; 4778475355fSChris Wilson 4788475355fSChris Wilson vaddr = kmap_atomic(page); 4798475355fSChris Wilson unwritten = __copy_from_user_inatomic(vaddr + pg, 4808475355fSChris Wilson user_data, 4818475355fSChris Wilson len); 4828475355fSChris Wilson kunmap_atomic(vaddr); 4838475355fSChris Wilson 484c5edd542SMatthew Wilcox (Oracle) err = aops->write_end(obj->base.filp, mapping, offset, len, 485c5edd542SMatthew Wilcox (Oracle) len - unwritten, page, data); 4868475355fSChris Wilson if (err < 0) 4878475355fSChris Wilson return err; 4888475355fSChris Wilson 4898475355fSChris Wilson /* We don't handle -EFAULT, leave it to the caller to check */ 4908475355fSChris Wilson if (unwritten) 4918475355fSChris Wilson return -ENODEV; 4928475355fSChris Wilson 4938475355fSChris Wilson remain -= len; 4948475355fSChris Wilson user_data += len; 4958475355fSChris Wilson offset += len; 4968475355fSChris Wilson pg = 0; 4978475355fSChris Wilson } while (remain); 4988475355fSChris Wilson 4998475355fSChris Wilson return 0; 5008475355fSChris Wilson } 5018475355fSChris Wilson 502a6117097SMaarten Lankhorst static int 503a6117097SMaarten Lankhorst shmem_pread(struct drm_i915_gem_object *obj, 504a6117097SMaarten Lankhorst const struct drm_i915_gem_pread *arg) 505a6117097SMaarten Lankhorst { 506a6117097SMaarten Lankhorst if (!i915_gem_object_has_struct_page(obj)) 507a6117097SMaarten Lankhorst return i915_gem_object_pread_phys(obj, arg); 508a6117097SMaarten Lankhorst 509a6117097SMaarten Lankhorst return -ENODEV; 510a6117097SMaarten Lankhorst } 511a6117097SMaarten Lankhorst 5120c159ffeSChris Wilson static void shmem_release(struct drm_i915_gem_object *obj) 5130c159ffeSChris Wilson { 5140ff37575SThomas Hellström if (i915_gem_object_has_struct_page(obj)) 515da1184cdSMatthew Auld i915_gem_object_release_memory_region(obj); 516da1184cdSMatthew Auld 5170c159ffeSChris Wilson fput(obj->base.filp); 5180c159ffeSChris Wilson } 5190c159ffeSChris Wilson 5208475355fSChris Wilson const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 5217d192daaSChris Wilson .name = "i915_gem_object_shmem", 522c471748dSMaarten Lankhorst .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 5238475355fSChris Wilson 5248475355fSChris Wilson .get_pages = shmem_get_pages, 5258475355fSChris Wilson .put_pages = shmem_put_pages, 526f033428dSChris Wilson .truncate = shmem_truncate, 527ffa3fe08SMatthew Auld .shrink = shmem_shrink, 5288475355fSChris Wilson 5298475355fSChris Wilson .pwrite = shmem_pwrite, 530a6117097SMaarten Lankhorst .pread = shmem_pread, 5310c159ffeSChris Wilson 5320c159ffeSChris Wilson .release = shmem_release, 5338475355fSChris Wilson }; 5348475355fSChris Wilson 535da1184cdSMatthew Auld static int __create_shmem(struct drm_i915_private *i915, 5368475355fSChris Wilson struct drm_gem_object *obj, 537da1184cdSMatthew Auld resource_size_t size) 5388475355fSChris Wilson { 5398475355fSChris Wilson unsigned long flags = VM_NORESERVE; 5408475355fSChris Wilson struct file *filp; 5418475355fSChris Wilson 5428475355fSChris Wilson drm_gem_private_object_init(&i915->drm, obj, size); 5438475355fSChris Wilson 544662c04e2SGwan-gyeong Mun /* XXX: The __shmem_file_setup() function returns -EINVAL if size is 545662c04e2SGwan-gyeong Mun * greater than MAX_LFS_FILESIZE. 546662c04e2SGwan-gyeong Mun * To handle the same error as other code that returns -E2BIG when 547662c04e2SGwan-gyeong Mun * the size is too large, we add a code that returns -E2BIG when the 548662c04e2SGwan-gyeong Mun * size is larger than the size that can be handled. 549662c04e2SGwan-gyeong Mun * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false, 550662c04e2SGwan-gyeong Mun * so we only needs to check when BITS_PER_LONG is 64. 551662c04e2SGwan-gyeong Mun * If BITS_PER_LONG is 32, E2BIG checks are processed when 552662c04e2SGwan-gyeong Mun * i915_gem_object_size_2big() is called before init_object() callback 553662c04e2SGwan-gyeong Mun * is called. 554662c04e2SGwan-gyeong Mun */ 555662c04e2SGwan-gyeong Mun if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE) 556662c04e2SGwan-gyeong Mun return -E2BIG; 557662c04e2SGwan-gyeong Mun 5588475355fSChris Wilson if (i915->mm.gemfs) 5598475355fSChris Wilson filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 5608475355fSChris Wilson flags); 5618475355fSChris Wilson else 5628475355fSChris Wilson filp = shmem_file_setup("i915", size, flags); 5638475355fSChris Wilson if (IS_ERR(filp)) 5648475355fSChris Wilson return PTR_ERR(filp); 5658475355fSChris Wilson 5668475355fSChris Wilson obj->filp = filp; 5678475355fSChris Wilson return 0; 5688475355fSChris Wilson } 5698475355fSChris Wilson 57097d55396SMatthew Auld static int shmem_object_init(struct intel_memory_region *mem, 57197d55396SMatthew Auld struct drm_i915_gem_object *obj, 5729b78b5daSMatthew Auld resource_size_t offset, 573da1184cdSMatthew Auld resource_size_t size, 574d22632c8SMatthew Auld resource_size_t page_size, 575da1184cdSMatthew Auld unsigned int flags) 5768475355fSChris Wilson { 5777867d709SChris Wilson static struct lock_class_key lock_class; 578da1184cdSMatthew Auld struct drm_i915_private *i915 = mem->i915; 5798475355fSChris Wilson struct address_space *mapping; 5808475355fSChris Wilson unsigned int cache_level; 5818475355fSChris Wilson gfp_t mask; 5828475355fSChris Wilson int ret; 5838475355fSChris Wilson 584da1184cdSMatthew Auld ret = __create_shmem(i915, &obj->base, size); 5858475355fSChris Wilson if (ret) 58697d55396SMatthew Auld return ret; 5878475355fSChris Wilson 5888475355fSChris Wilson mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 5898475355fSChris Wilson if (IS_I965GM(i915) || IS_I965G(i915)) { 5908475355fSChris Wilson /* 965gm cannot relocate objects above 4GiB. */ 5918475355fSChris Wilson mask &= ~__GFP_HIGHMEM; 5928475355fSChris Wilson mask |= __GFP_DMA32; 5938475355fSChris Wilson } 5948475355fSChris Wilson 5958475355fSChris Wilson mapping = obj->base.filp->f_mapping; 5968475355fSChris Wilson mapping_set_gfp_mask(mapping, mask); 5978475355fSChris Wilson GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 5988475355fSChris Wilson 599bca0d1d3SAravind Iddamsetty i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags); 6000ff37575SThomas Hellström obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 6018475355fSChris Wilson obj->write_domain = I915_GEM_DOMAIN_CPU; 6028475355fSChris Wilson obj->read_domains = I915_GEM_DOMAIN_CPU; 6038475355fSChris Wilson 6040fbcf570SFei Yang /* 6050fbcf570SFei Yang * MTL doesn't snoop CPU cache by default for GPU access (namely 6060fbcf570SFei Yang * 1-way coherency). However some UMD's are currently depending on 6070fbcf570SFei Yang * that. Make 1-way coherent the default setting for MTL. A follow 6080fbcf570SFei Yang * up patch will extend the GEM_CREATE uAPI to allow UMD's specify 6090fbcf570SFei Yang * caching mode at BO creation time 6100fbcf570SFei Yang */ 6110fbcf570SFei Yang if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))) 6128475355fSChris Wilson /* On some devices, we can have the GPU use the LLC (the CPU 6138475355fSChris Wilson * cache) for about a 10% performance improvement 6148475355fSChris Wilson * compared to uncached. Graphics requests other than 6158475355fSChris Wilson * display scanout are coherent with the CPU in 6168475355fSChris Wilson * accessing this cache. This means in this mode we 6178475355fSChris Wilson * don't need to clflush on the CPU side, and on the 6188475355fSChris Wilson * GPU side we only need to flush internal caches to 6198475355fSChris Wilson * get data visible to the CPU. 6208475355fSChris Wilson * 6218475355fSChris Wilson * However, we maintain the display planes as UC, and so 6228475355fSChris Wilson * need to rebind when first used as such. 6238475355fSChris Wilson */ 6248475355fSChris Wilson cache_level = I915_CACHE_LLC; 6258475355fSChris Wilson else 6268475355fSChris Wilson cache_level = I915_CACHE_NONE; 6278475355fSChris Wilson 6288475355fSChris Wilson i915_gem_object_set_cache_coherency(obj, cache_level); 6298475355fSChris Wilson 630c471748dSMaarten Lankhorst i915_gem_object_init_memory_region(obj, mem); 6318475355fSChris Wilson 63297d55396SMatthew Auld return 0; 6338475355fSChris Wilson } 6348475355fSChris Wilson 635da1184cdSMatthew Auld struct drm_i915_gem_object * 636da1184cdSMatthew Auld i915_gem_object_create_shmem(struct drm_i915_private *i915, 637da1184cdSMatthew Auld resource_size_t size) 638da1184cdSMatthew Auld { 639da1184cdSMatthew Auld return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 640d22632c8SMatthew Auld size, 0, 0); 641da1184cdSMatthew Auld } 642da1184cdSMatthew Auld 6438475355fSChris Wilson /* Allocate a new GEM object and fill it with the supplied data */ 6448475355fSChris Wilson struct drm_i915_gem_object * 6458475355fSChris Wilson i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 646da1184cdSMatthew Auld const void *data, resource_size_t size) 6478475355fSChris Wilson { 6488475355fSChris Wilson struct drm_i915_gem_object *obj; 6498475355fSChris Wilson struct file *file; 650c5edd542SMatthew Wilcox (Oracle) const struct address_space_operations *aops; 651da1184cdSMatthew Auld resource_size_t offset; 6528475355fSChris Wilson int err; 6538475355fSChris Wilson 65432b7cf51SThomas Hellström GEM_WARN_ON(IS_DGFX(dev_priv)); 6558475355fSChris Wilson obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 6568475355fSChris Wilson if (IS_ERR(obj)) 6578475355fSChris Wilson return obj; 6588475355fSChris Wilson 6598475355fSChris Wilson GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 6608475355fSChris Wilson 6618475355fSChris Wilson file = obj->base.filp; 662c5edd542SMatthew Wilcox (Oracle) aops = file->f_mapping->a_ops; 6638475355fSChris Wilson offset = 0; 6648475355fSChris Wilson do { 6658475355fSChris Wilson unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 6668475355fSChris Wilson struct page *page; 6678475355fSChris Wilson void *pgdata, *vaddr; 6688475355fSChris Wilson 669c5edd542SMatthew Wilcox (Oracle) err = aops->write_begin(file, file->f_mapping, offset, len, 6708475355fSChris Wilson &page, &pgdata); 6718475355fSChris Wilson if (err < 0) 6728475355fSChris Wilson goto fail; 6738475355fSChris Wilson 6748475355fSChris Wilson vaddr = kmap(page); 6758475355fSChris Wilson memcpy(vaddr, data, len); 6768475355fSChris Wilson kunmap(page); 6778475355fSChris Wilson 678c5edd542SMatthew Wilcox (Oracle) err = aops->write_end(file, file->f_mapping, offset, len, len, 6798475355fSChris Wilson page, pgdata); 6808475355fSChris Wilson if (err < 0) 6818475355fSChris Wilson goto fail; 6828475355fSChris Wilson 6838475355fSChris Wilson size -= len; 6848475355fSChris Wilson data += len; 6858475355fSChris Wilson offset += len; 6868475355fSChris Wilson } while (size); 6878475355fSChris Wilson 6888475355fSChris Wilson return obj; 6898475355fSChris Wilson 6908475355fSChris Wilson fail: 6918475355fSChris Wilson i915_gem_object_put(obj); 6928475355fSChris Wilson return ERR_PTR(err); 6938475355fSChris Wilson } 694da1184cdSMatthew Auld 695da1184cdSMatthew Auld static int init_shmem(struct intel_memory_region *mem) 696da1184cdSMatthew Auld { 697b499914eSTvrtko Ursulin i915_gemfs_init(mem->i915); 69838f1cb68SLukasz Fiedorowicz intel_memory_region_set_name(mem, "system"); 69938f1cb68SLukasz Fiedorowicz 700b499914eSTvrtko Ursulin return 0; /* We have fallback to the kernel mnt if gemfs init failed. */ 701da1184cdSMatthew Auld } 702da1184cdSMatthew Auld 7038b1f7f92SThomas Hellström static int release_shmem(struct intel_memory_region *mem) 704da1184cdSMatthew Auld { 705da1184cdSMatthew Auld i915_gemfs_fini(mem->i915); 7068b1f7f92SThomas Hellström return 0; 707da1184cdSMatthew Auld } 708da1184cdSMatthew Auld 709da1184cdSMatthew Auld static const struct intel_memory_region_ops shmem_region_ops = { 710da1184cdSMatthew Auld .init = init_shmem, 711da1184cdSMatthew Auld .release = release_shmem, 71297d55396SMatthew Auld .init_object = shmem_object_init, 713da1184cdSMatthew Auld }; 714da1184cdSMatthew Auld 715d1487389SThomas Hellström struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 716d1487389SThomas Hellström u16 type, u16 instance) 717da1184cdSMatthew Auld { 718da1184cdSMatthew Auld return intel_memory_region_create(i915, 0, 719da1184cdSMatthew Auld totalram_pages() << PAGE_SHIFT, 720235582caSMatthew Auld PAGE_SIZE, 0, 0, 721d1487389SThomas Hellström type, instance, 722da1184cdSMatthew Auld &shmem_region_ops); 723da1184cdSMatthew Auld } 72441a9c75dSChris Wilson 72541a9c75dSChris Wilson bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 72641a9c75dSChris Wilson { 72741a9c75dSChris Wilson return obj->ops == &i915_gem_shmem_ops; 72841a9c75dSChris Wilson } 729