18475355fSChris Wilson /* 28475355fSChris Wilson * SPDX-License-Identifier: MIT 38475355fSChris Wilson * 48475355fSChris Wilson * Copyright © 2014-2016 Intel Corporation 58475355fSChris Wilson */ 68475355fSChris Wilson 78475355fSChris Wilson #include <linux/pagevec.h> 88475355fSChris Wilson #include <linux/swap.h> 98475355fSChris Wilson 10da1184cdSMatthew Auld #include "gem/i915_gem_region.h" 118475355fSChris Wilson #include "i915_drv.h" 12da1184cdSMatthew Auld #include "i915_gemfs.h" 138475355fSChris Wilson #include "i915_gem_object.h" 1437d63f8fSChris Wilson #include "i915_scatterlist.h" 15a09d9a80SJani Nikula #include "i915_trace.h" 168475355fSChris Wilson 178475355fSChris Wilson /* 188475355fSChris Wilson * Move pages to appropriate lru and release the pagevec, decrementing the 198475355fSChris Wilson * ref count of those pages. 208475355fSChris Wilson */ 218475355fSChris Wilson static void check_release_pagevec(struct pagevec *pvec) 228475355fSChris Wilson { 238475355fSChris Wilson check_move_unevictable_pages(pvec); 248475355fSChris Wilson __pagevec_release(pvec); 258475355fSChris Wilson cond_resched(); 268475355fSChris Wilson } 278475355fSChris Wilson 288475355fSChris Wilson static int shmem_get_pages(struct drm_i915_gem_object *obj) 298475355fSChris Wilson { 308475355fSChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev); 31da1184cdSMatthew Auld struct intel_memory_region *mem = obj->mm.region; 328475355fSChris Wilson const unsigned long page_count = obj->base.size / PAGE_SIZE; 338475355fSChris Wilson unsigned long i; 348475355fSChris Wilson struct address_space *mapping; 358475355fSChris Wilson struct sg_table *st; 368475355fSChris Wilson struct scatterlist *sg; 378475355fSChris Wilson struct sgt_iter sgt_iter; 388475355fSChris Wilson struct page *page; 398475355fSChris Wilson unsigned long last_pfn = 0; /* suppress gcc warning */ 408475355fSChris Wilson unsigned int max_segment = i915_sg_segment_size(); 418475355fSChris Wilson unsigned int sg_page_sizes; 428475355fSChris Wilson gfp_t noreclaim; 438475355fSChris Wilson int ret; 448475355fSChris Wilson 458475355fSChris Wilson /* 468475355fSChris Wilson * Assert that the object is not currently in any GPU domain. As it 478475355fSChris Wilson * wasn't in the GTT, there shouldn't be any way it could have been in 488475355fSChris Wilson * a GPU cache 498475355fSChris Wilson */ 508475355fSChris Wilson GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 518475355fSChris Wilson GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 528475355fSChris Wilson 538475355fSChris Wilson /* 548475355fSChris Wilson * If there's no chance of allocating enough pages for the whole 558475355fSChris Wilson * object, bail early. 568475355fSChris Wilson */ 57da1184cdSMatthew Auld if (obj->base.size > resource_size(&mem->region)) 588475355fSChris Wilson return -ENOMEM; 598475355fSChris Wilson 608475355fSChris Wilson st = kmalloc(sizeof(*st), GFP_KERNEL); 618475355fSChris Wilson if (!st) 628475355fSChris Wilson return -ENOMEM; 638475355fSChris Wilson 648475355fSChris Wilson rebuild_st: 658475355fSChris Wilson if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 668475355fSChris Wilson kfree(st); 678475355fSChris Wilson return -ENOMEM; 688475355fSChris Wilson } 698475355fSChris Wilson 708475355fSChris Wilson /* 718475355fSChris Wilson * Get the list of pages out of our struct file. They'll be pinned 728475355fSChris Wilson * at this point until we release them. 738475355fSChris Wilson * 748475355fSChris Wilson * Fail silently without starting the shrinker 758475355fSChris Wilson */ 768475355fSChris Wilson mapping = obj->base.filp->f_mapping; 778475355fSChris Wilson mapping_set_unevictable(mapping); 788475355fSChris Wilson noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 798475355fSChris Wilson noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 808475355fSChris Wilson 818475355fSChris Wilson sg = st->sgl; 828475355fSChris Wilson st->nents = 0; 838475355fSChris Wilson sg_page_sizes = 0; 848475355fSChris Wilson for (i = 0; i < page_count; i++) { 858475355fSChris Wilson const unsigned int shrink[] = { 863b4fa964SChris Wilson I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 878475355fSChris Wilson 0, 888475355fSChris Wilson }, *s = shrink; 898475355fSChris Wilson gfp_t gfp = noreclaim; 908475355fSChris Wilson 918475355fSChris Wilson do { 928475355fSChris Wilson cond_resched(); 938475355fSChris Wilson page = shmem_read_mapping_page_gfp(mapping, i, gfp); 948475355fSChris Wilson if (!IS_ERR(page)) 958475355fSChris Wilson break; 968475355fSChris Wilson 978475355fSChris Wilson if (!*s) { 988475355fSChris Wilson ret = PTR_ERR(page); 998475355fSChris Wilson goto err_sg; 1008475355fSChris Wilson } 1018475355fSChris Wilson 102cf41a8f1SMaarten Lankhorst i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); 1038475355fSChris Wilson 1048475355fSChris Wilson /* 1058475355fSChris Wilson * We've tried hard to allocate the memory by reaping 1068475355fSChris Wilson * our own buffer, now let the real VM do its job and 1078475355fSChris Wilson * go down in flames if truly OOM. 1088475355fSChris Wilson * 1098475355fSChris Wilson * However, since graphics tend to be disposable, 1108475355fSChris Wilson * defer the oom here by reporting the ENOMEM back 1118475355fSChris Wilson * to userspace. 1128475355fSChris Wilson */ 1138475355fSChris Wilson if (!*s) { 1148475355fSChris Wilson /* reclaim and warn, but no oom */ 1158475355fSChris Wilson gfp = mapping_gfp_mask(mapping); 1168475355fSChris Wilson 1178475355fSChris Wilson /* 1188475355fSChris Wilson * Our bo are always dirty and so we require 1198475355fSChris Wilson * kswapd to reclaim our pages (direct reclaim 1208475355fSChris Wilson * does not effectively begin pageout of our 1218475355fSChris Wilson * buffers on its own). However, direct reclaim 1228475355fSChris Wilson * only waits for kswapd when under allocation 1238475355fSChris Wilson * congestion. So as a result __GFP_RECLAIM is 1248475355fSChris Wilson * unreliable and fails to actually reclaim our 1258475355fSChris Wilson * dirty pages -- unless you try over and over 1268475355fSChris Wilson * again with !__GFP_NORETRY. However, we still 1278475355fSChris Wilson * want to fail this allocation rather than 1288475355fSChris Wilson * trigger the out-of-memory killer and for 1298475355fSChris Wilson * this we want __GFP_RETRY_MAYFAIL. 1308475355fSChris Wilson */ 1318475355fSChris Wilson gfp |= __GFP_RETRY_MAYFAIL; 1328475355fSChris Wilson } 1338475355fSChris Wilson } while (1); 1348475355fSChris Wilson 1358475355fSChris Wilson if (!i || 1368475355fSChris Wilson sg->length >= max_segment || 1378475355fSChris Wilson page_to_pfn(page) != last_pfn + 1) { 1388475355fSChris Wilson if (i) { 1398475355fSChris Wilson sg_page_sizes |= sg->length; 1408475355fSChris Wilson sg = sg_next(sg); 1418475355fSChris Wilson } 1428475355fSChris Wilson st->nents++; 1438475355fSChris Wilson sg_set_page(sg, page, PAGE_SIZE, 0); 1448475355fSChris Wilson } else { 1458475355fSChris Wilson sg->length += PAGE_SIZE; 1468475355fSChris Wilson } 1478475355fSChris Wilson last_pfn = page_to_pfn(page); 1488475355fSChris Wilson 1498475355fSChris Wilson /* Check that the i965g/gm workaround works. */ 150ea97c4caSChris Wilson GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); 1518475355fSChris Wilson } 1528475355fSChris Wilson if (sg) { /* loop terminated early; short sg table */ 1538475355fSChris Wilson sg_page_sizes |= sg->length; 1548475355fSChris Wilson sg_mark_end(sg); 1558475355fSChris Wilson } 1568475355fSChris Wilson 1578475355fSChris Wilson /* Trim unused sg entries to avoid wasting memory. */ 1588475355fSChris Wilson i915_sg_trim(st); 1598475355fSChris Wilson 1608475355fSChris Wilson ret = i915_gem_gtt_prepare_pages(obj, st); 1618475355fSChris Wilson if (ret) { 1628475355fSChris Wilson /* 1638475355fSChris Wilson * DMA remapping failed? One possible cause is that 1648475355fSChris Wilson * it could not reserve enough large entries, asking 1658475355fSChris Wilson * for PAGE_SIZE chunks instead may be helpful. 1668475355fSChris Wilson */ 1678475355fSChris Wilson if (max_segment > PAGE_SIZE) { 1688475355fSChris Wilson for_each_sgt_page(page, sgt_iter, st) 1698475355fSChris Wilson put_page(page); 1708475355fSChris Wilson sg_free_table(st); 1718475355fSChris Wilson 1728475355fSChris Wilson max_segment = PAGE_SIZE; 1738475355fSChris Wilson goto rebuild_st; 1748475355fSChris Wilson } else { 1758ff5446aSThomas Zimmermann dev_warn(i915->drm.dev, 1768475355fSChris Wilson "Failed to DMA remap %lu pages\n", 1778475355fSChris Wilson page_count); 1788475355fSChris Wilson goto err_pages; 1798475355fSChris Wilson } 1808475355fSChris Wilson } 1818475355fSChris Wilson 1828475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj)) 1838475355fSChris Wilson i915_gem_object_do_bit_17_swizzle(obj, st); 1848475355fSChris Wilson 185*13d29c82SMatthew Auld /* 186*13d29c82SMatthew Auld * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it 187*13d29c82SMatthew Auld * possible for userspace to bypass the GTT caching bits set by the 188*13d29c82SMatthew Auld * kernel, as per the given object cache_level. This is troublesome 189*13d29c82SMatthew Auld * since the heavy flush we apply when first gathering the pages is 190*13d29c82SMatthew Auld * skipped if the kernel thinks the object is coherent with the GPU. As 191*13d29c82SMatthew Auld * a result it might be possible to bypass the cache and read the 192*13d29c82SMatthew Auld * contents of the page directly, which could be stale data. If it's 193*13d29c82SMatthew Auld * just a case of userspace shooting themselves in the foot then so be 194*13d29c82SMatthew Auld * it, but since i915 takes the stance of always zeroing memory before 195*13d29c82SMatthew Auld * handing it to userspace, we need to prevent this. 196*13d29c82SMatthew Auld * 197*13d29c82SMatthew Auld * By setting cache_dirty here we make the clflush in set_pages 198*13d29c82SMatthew Auld * unconditional on such platforms. 199*13d29c82SMatthew Auld */ 200*13d29c82SMatthew Auld if (IS_JSL_EHL(i915) && obj->flags & I915_BO_ALLOC_USER) 201*13d29c82SMatthew Auld obj->cache_dirty = true; 202*13d29c82SMatthew Auld 2038475355fSChris Wilson __i915_gem_object_set_pages(obj, st, sg_page_sizes); 2048475355fSChris Wilson 2058475355fSChris Wilson return 0; 2068475355fSChris Wilson 2078475355fSChris Wilson err_sg: 2088475355fSChris Wilson sg_mark_end(sg); 2098475355fSChris Wilson err_pages: 2108475355fSChris Wilson mapping_clear_unevictable(mapping); 211957ad9a0SChris Wilson if (sg != st->sgl) { 212957ad9a0SChris Wilson struct pagevec pvec; 213957ad9a0SChris Wilson 2148475355fSChris Wilson pagevec_init(&pvec); 2158475355fSChris Wilson for_each_sgt_page(page, sgt_iter, st) { 2168475355fSChris Wilson if (!pagevec_add(&pvec, page)) 2178475355fSChris Wilson check_release_pagevec(&pvec); 2188475355fSChris Wilson } 2198475355fSChris Wilson if (pagevec_count(&pvec)) 2208475355fSChris Wilson check_release_pagevec(&pvec); 221957ad9a0SChris Wilson } 2228475355fSChris Wilson sg_free_table(st); 2238475355fSChris Wilson kfree(st); 2248475355fSChris Wilson 2258475355fSChris Wilson /* 2268475355fSChris Wilson * shmemfs first checks if there is enough memory to allocate the page 2278475355fSChris Wilson * and reports ENOSPC should there be insufficient, along with the usual 2288475355fSChris Wilson * ENOMEM for a genuine allocation failure. 2298475355fSChris Wilson * 2308475355fSChris Wilson * We use ENOSPC in our driver to mean that we have run out of aperture 2318475355fSChris Wilson * space and so want to translate the error from shmemfs back to our 2328475355fSChris Wilson * usual understanding of ENOMEM. 2338475355fSChris Wilson */ 2348475355fSChris Wilson if (ret == -ENOSPC) 2358475355fSChris Wilson ret = -ENOMEM; 2368475355fSChris Wilson 2378475355fSChris Wilson return ret; 2388475355fSChris Wilson } 2398475355fSChris Wilson 240f033428dSChris Wilson static void 241f033428dSChris Wilson shmem_truncate(struct drm_i915_gem_object *obj) 242f033428dSChris Wilson { 243f033428dSChris Wilson /* 244f033428dSChris Wilson * Our goal here is to return as much of the memory as 245f033428dSChris Wilson * is possible back to the system as we are called from OOM. 246f033428dSChris Wilson * To do this we must instruct the shmfs to drop all of its 247f033428dSChris Wilson * backing pages, *now*. 248f033428dSChris Wilson */ 249f033428dSChris Wilson shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 250f033428dSChris Wilson obj->mm.madv = __I915_MADV_PURGED; 251f033428dSChris Wilson obj->mm.pages = ERR_PTR(-EFAULT); 252f033428dSChris Wilson } 253f033428dSChris Wilson 254f033428dSChris Wilson static void 255f033428dSChris Wilson shmem_writeback(struct drm_i915_gem_object *obj) 256f033428dSChris Wilson { 257f033428dSChris Wilson struct address_space *mapping; 258f033428dSChris Wilson struct writeback_control wbc = { 259f033428dSChris Wilson .sync_mode = WB_SYNC_NONE, 260f033428dSChris Wilson .nr_to_write = SWAP_CLUSTER_MAX, 261f033428dSChris Wilson .range_start = 0, 262f033428dSChris Wilson .range_end = LLONG_MAX, 263f033428dSChris Wilson .for_reclaim = 1, 264f033428dSChris Wilson }; 265f033428dSChris Wilson unsigned long i; 266f033428dSChris Wilson 267f033428dSChris Wilson /* 268f033428dSChris Wilson * Leave mmapings intact (GTT will have been revoked on unbinding, 269f033428dSChris Wilson * leaving only CPU mmapings around) and add those pages to the LRU 270f033428dSChris Wilson * instead of invoking writeback so they are aged and paged out 271f033428dSChris Wilson * as normal. 272f033428dSChris Wilson */ 273f033428dSChris Wilson mapping = obj->base.filp->f_mapping; 274f033428dSChris Wilson 275f033428dSChris Wilson /* Begin writeback on each dirty page */ 276f033428dSChris Wilson for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { 277f033428dSChris Wilson struct page *page; 278f033428dSChris Wilson 2799dfc8ff3SMatthew Wilcox (Oracle) page = find_lock_page(mapping, i); 2809dfc8ff3SMatthew Wilcox (Oracle) if (!page) 281f033428dSChris Wilson continue; 282f033428dSChris Wilson 283f033428dSChris Wilson if (!page_mapped(page) && clear_page_dirty_for_io(page)) { 284f033428dSChris Wilson int ret; 285f033428dSChris Wilson 286f033428dSChris Wilson SetPageReclaim(page); 287f033428dSChris Wilson ret = mapping->a_ops->writepage(page, &wbc); 288f033428dSChris Wilson if (!PageWriteback(page)) 289f033428dSChris Wilson ClearPageReclaim(page); 290f033428dSChris Wilson if (!ret) 291f033428dSChris Wilson goto put; 292f033428dSChris Wilson } 293f033428dSChris Wilson unlock_page(page); 294f033428dSChris Wilson put: 295f033428dSChris Wilson put_page(page); 296f033428dSChris Wilson } 297f033428dSChris Wilson } 298f033428dSChris Wilson 2998475355fSChris Wilson void 3008475355fSChris Wilson __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 3018475355fSChris Wilson struct sg_table *pages, 3028475355fSChris Wilson bool needs_clflush) 3038475355fSChris Wilson { 3048475355fSChris Wilson GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 3058475355fSChris Wilson 3068475355fSChris Wilson if (obj->mm.madv == I915_MADV_DONTNEED) 3078475355fSChris Wilson obj->mm.dirty = false; 3088475355fSChris Wilson 3098475355fSChris Wilson if (needs_clflush && 3108475355fSChris Wilson (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 3118475355fSChris Wilson !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 3128475355fSChris Wilson drm_clflush_sg(pages); 3138475355fSChris Wilson 3148475355fSChris Wilson __start_cpu_write(obj); 3158475355fSChris Wilson } 3168475355fSChris Wilson 317a85fffe3SMaarten Lankhorst void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) 3188475355fSChris Wilson { 3198475355fSChris Wilson struct sgt_iter sgt_iter; 3208475355fSChris Wilson struct pagevec pvec; 3218475355fSChris Wilson struct page *page; 3228475355fSChris Wilson 32332b7cf51SThomas Hellström GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev))); 3248475355fSChris Wilson __i915_gem_object_release_shmem(obj, pages, true); 3258475355fSChris Wilson 3268475355fSChris Wilson i915_gem_gtt_finish_pages(obj, pages); 3278475355fSChris Wilson 3288475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj)) 3298475355fSChris Wilson i915_gem_object_save_bit_17_swizzle(obj, pages); 3308475355fSChris Wilson 3318475355fSChris Wilson mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); 3328475355fSChris Wilson 3338475355fSChris Wilson pagevec_init(&pvec); 3348475355fSChris Wilson for_each_sgt_page(page, sgt_iter, pages) { 3358475355fSChris Wilson if (obj->mm.dirty) 3368475355fSChris Wilson set_page_dirty(page); 3378475355fSChris Wilson 3388475355fSChris Wilson if (obj->mm.madv == I915_MADV_WILLNEED) 3398475355fSChris Wilson mark_page_accessed(page); 3408475355fSChris Wilson 3418475355fSChris Wilson if (!pagevec_add(&pvec, page)) 3428475355fSChris Wilson check_release_pagevec(&pvec); 3438475355fSChris Wilson } 3448475355fSChris Wilson if (pagevec_count(&pvec)) 3458475355fSChris Wilson check_release_pagevec(&pvec); 3468475355fSChris Wilson obj->mm.dirty = false; 3478475355fSChris Wilson 3488475355fSChris Wilson sg_free_table(pages); 3498475355fSChris Wilson kfree(pages); 3508475355fSChris Wilson } 3518475355fSChris Wilson 352a85fffe3SMaarten Lankhorst static void 353a85fffe3SMaarten Lankhorst shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 354a85fffe3SMaarten Lankhorst { 355a85fffe3SMaarten Lankhorst if (likely(i915_gem_object_has_struct_page(obj))) 356a85fffe3SMaarten Lankhorst i915_gem_object_put_pages_shmem(obj, pages); 357a85fffe3SMaarten Lankhorst else 358a85fffe3SMaarten Lankhorst i915_gem_object_put_pages_phys(obj, pages); 359a85fffe3SMaarten Lankhorst } 360a85fffe3SMaarten Lankhorst 3618475355fSChris Wilson static int 3628475355fSChris Wilson shmem_pwrite(struct drm_i915_gem_object *obj, 3638475355fSChris Wilson const struct drm_i915_gem_pwrite *arg) 3648475355fSChris Wilson { 3658475355fSChris Wilson struct address_space *mapping = obj->base.filp->f_mapping; 3668475355fSChris Wilson char __user *user_data = u64_to_user_ptr(arg->data_ptr); 3678475355fSChris Wilson u64 remain, offset; 3688475355fSChris Wilson unsigned int pg; 3698475355fSChris Wilson 3708475355fSChris Wilson /* Caller already validated user args */ 3718475355fSChris Wilson GEM_BUG_ON(!access_ok(user_data, arg->size)); 3728475355fSChris Wilson 373a6117097SMaarten Lankhorst if (!i915_gem_object_has_struct_page(obj)) 374a6117097SMaarten Lankhorst return i915_gem_object_pwrite_phys(obj, arg); 375a6117097SMaarten Lankhorst 3768475355fSChris Wilson /* 3778475355fSChris Wilson * Before we instantiate/pin the backing store for our use, we 3788475355fSChris Wilson * can prepopulate the shmemfs filp efficiently using a write into 3798475355fSChris Wilson * the pagecache. We avoid the penalty of instantiating all the 3808475355fSChris Wilson * pages, important if the user is just writing to a few and never 3818475355fSChris Wilson * uses the object on the GPU, and using a direct write into shmemfs 3828475355fSChris Wilson * allows it to avoid the cost of retrieving a page (either swapin 3838475355fSChris Wilson * or clearing-before-use) before it is overwritten. 3848475355fSChris Wilson */ 3858475355fSChris Wilson if (i915_gem_object_has_pages(obj)) 3868475355fSChris Wilson return -ENODEV; 3878475355fSChris Wilson 3888475355fSChris Wilson if (obj->mm.madv != I915_MADV_WILLNEED) 3898475355fSChris Wilson return -EFAULT; 3908475355fSChris Wilson 3918475355fSChris Wilson /* 3928475355fSChris Wilson * Before the pages are instantiated the object is treated as being 3938475355fSChris Wilson * in the CPU domain. The pages will be clflushed as required before 3948475355fSChris Wilson * use, and we can freely write into the pages directly. If userspace 3958475355fSChris Wilson * races pwrite with any other operation; corruption will ensue - 3968475355fSChris Wilson * that is userspace's prerogative! 3978475355fSChris Wilson */ 3988475355fSChris Wilson 3998475355fSChris Wilson remain = arg->size; 4008475355fSChris Wilson offset = arg->offset; 4018475355fSChris Wilson pg = offset_in_page(offset); 4028475355fSChris Wilson 4038475355fSChris Wilson do { 4048475355fSChris Wilson unsigned int len, unwritten; 4058475355fSChris Wilson struct page *page; 4068475355fSChris Wilson void *data, *vaddr; 4078475355fSChris Wilson int err; 4088475355fSChris Wilson char c; 4098475355fSChris Wilson 4108475355fSChris Wilson len = PAGE_SIZE - pg; 4118475355fSChris Wilson if (len > remain) 4128475355fSChris Wilson len = remain; 4138475355fSChris Wilson 4148475355fSChris Wilson /* Prefault the user page to reduce potential recursion */ 4158475355fSChris Wilson err = __get_user(c, user_data); 4168475355fSChris Wilson if (err) 4178475355fSChris Wilson return err; 4188475355fSChris Wilson 4198475355fSChris Wilson err = __get_user(c, user_data + len - 1); 4208475355fSChris Wilson if (err) 4218475355fSChris Wilson return err; 4228475355fSChris Wilson 4238475355fSChris Wilson err = pagecache_write_begin(obj->base.filp, mapping, 4248475355fSChris Wilson offset, len, 0, 4258475355fSChris Wilson &page, &data); 4268475355fSChris Wilson if (err < 0) 4278475355fSChris Wilson return err; 4288475355fSChris Wilson 4298475355fSChris Wilson vaddr = kmap_atomic(page); 4308475355fSChris Wilson unwritten = __copy_from_user_inatomic(vaddr + pg, 4318475355fSChris Wilson user_data, 4328475355fSChris Wilson len); 4338475355fSChris Wilson kunmap_atomic(vaddr); 4348475355fSChris Wilson 4358475355fSChris Wilson err = pagecache_write_end(obj->base.filp, mapping, 4368475355fSChris Wilson offset, len, len - unwritten, 4378475355fSChris Wilson page, data); 4388475355fSChris Wilson if (err < 0) 4398475355fSChris Wilson return err; 4408475355fSChris Wilson 4418475355fSChris Wilson /* We don't handle -EFAULT, leave it to the caller to check */ 4428475355fSChris Wilson if (unwritten) 4438475355fSChris Wilson return -ENODEV; 4448475355fSChris Wilson 4458475355fSChris Wilson remain -= len; 4468475355fSChris Wilson user_data += len; 4478475355fSChris Wilson offset += len; 4488475355fSChris Wilson pg = 0; 4498475355fSChris Wilson } while (remain); 4508475355fSChris Wilson 4518475355fSChris Wilson return 0; 4528475355fSChris Wilson } 4538475355fSChris Wilson 454a6117097SMaarten Lankhorst static int 455a6117097SMaarten Lankhorst shmem_pread(struct drm_i915_gem_object *obj, 456a6117097SMaarten Lankhorst const struct drm_i915_gem_pread *arg) 457a6117097SMaarten Lankhorst { 458a6117097SMaarten Lankhorst if (!i915_gem_object_has_struct_page(obj)) 459a6117097SMaarten Lankhorst return i915_gem_object_pread_phys(obj, arg); 460a6117097SMaarten Lankhorst 461a6117097SMaarten Lankhorst return -ENODEV; 462a6117097SMaarten Lankhorst } 463a6117097SMaarten Lankhorst 4640c159ffeSChris Wilson static void shmem_release(struct drm_i915_gem_object *obj) 4650c159ffeSChris Wilson { 4660ff37575SThomas Hellström if (i915_gem_object_has_struct_page(obj)) 467da1184cdSMatthew Auld i915_gem_object_release_memory_region(obj); 468da1184cdSMatthew Auld 4690c159ffeSChris Wilson fput(obj->base.filp); 4700c159ffeSChris Wilson } 4710c159ffeSChris Wilson 4728475355fSChris Wilson const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 4737d192daaSChris Wilson .name = "i915_gem_object_shmem", 474c471748dSMaarten Lankhorst .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 4758475355fSChris Wilson 4768475355fSChris Wilson .get_pages = shmem_get_pages, 4778475355fSChris Wilson .put_pages = shmem_put_pages, 478f033428dSChris Wilson .truncate = shmem_truncate, 479f033428dSChris Wilson .writeback = shmem_writeback, 4808475355fSChris Wilson 4818475355fSChris Wilson .pwrite = shmem_pwrite, 482a6117097SMaarten Lankhorst .pread = shmem_pread, 4830c159ffeSChris Wilson 4840c159ffeSChris Wilson .release = shmem_release, 4858475355fSChris Wilson }; 4868475355fSChris Wilson 487da1184cdSMatthew Auld static int __create_shmem(struct drm_i915_private *i915, 4888475355fSChris Wilson struct drm_gem_object *obj, 489da1184cdSMatthew Auld resource_size_t size) 4908475355fSChris Wilson { 4918475355fSChris Wilson unsigned long flags = VM_NORESERVE; 4928475355fSChris Wilson struct file *filp; 4938475355fSChris Wilson 4948475355fSChris Wilson drm_gem_private_object_init(&i915->drm, obj, size); 4958475355fSChris Wilson 4968475355fSChris Wilson if (i915->mm.gemfs) 4978475355fSChris Wilson filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 4988475355fSChris Wilson flags); 4998475355fSChris Wilson else 5008475355fSChris Wilson filp = shmem_file_setup("i915", size, flags); 5018475355fSChris Wilson if (IS_ERR(filp)) 5028475355fSChris Wilson return PTR_ERR(filp); 5038475355fSChris Wilson 5048475355fSChris Wilson obj->filp = filp; 5058475355fSChris Wilson return 0; 5068475355fSChris Wilson } 5078475355fSChris Wilson 50897d55396SMatthew Auld static int shmem_object_init(struct intel_memory_region *mem, 50997d55396SMatthew Auld struct drm_i915_gem_object *obj, 510da1184cdSMatthew Auld resource_size_t size, 511d22632c8SMatthew Auld resource_size_t page_size, 512da1184cdSMatthew Auld unsigned int flags) 5138475355fSChris Wilson { 5147867d709SChris Wilson static struct lock_class_key lock_class; 515da1184cdSMatthew Auld struct drm_i915_private *i915 = mem->i915; 5168475355fSChris Wilson struct address_space *mapping; 5178475355fSChris Wilson unsigned int cache_level; 5188475355fSChris Wilson gfp_t mask; 5198475355fSChris Wilson int ret; 5208475355fSChris Wilson 521da1184cdSMatthew Auld ret = __create_shmem(i915, &obj->base, size); 5228475355fSChris Wilson if (ret) 52397d55396SMatthew Auld return ret; 5248475355fSChris Wilson 5258475355fSChris Wilson mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 5268475355fSChris Wilson if (IS_I965GM(i915) || IS_I965G(i915)) { 5278475355fSChris Wilson /* 965gm cannot relocate objects above 4GiB. */ 5288475355fSChris Wilson mask &= ~__GFP_HIGHMEM; 5298475355fSChris Wilson mask |= __GFP_DMA32; 5308475355fSChris Wilson } 5318475355fSChris Wilson 5328475355fSChris Wilson mapping = obj->base.filp->f_mapping; 5338475355fSChris Wilson mapping_set_gfp_mask(mapping, mask); 5348475355fSChris Wilson GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 5358475355fSChris Wilson 5360ff37575SThomas Hellström i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0); 5370ff37575SThomas Hellström obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 5388475355fSChris Wilson obj->write_domain = I915_GEM_DOMAIN_CPU; 5398475355fSChris Wilson obj->read_domains = I915_GEM_DOMAIN_CPU; 5408475355fSChris Wilson 5418475355fSChris Wilson if (HAS_LLC(i915)) 5428475355fSChris Wilson /* On some devices, we can have the GPU use the LLC (the CPU 5438475355fSChris Wilson * cache) for about a 10% performance improvement 5448475355fSChris Wilson * compared to uncached. Graphics requests other than 5458475355fSChris Wilson * display scanout are coherent with the CPU in 5468475355fSChris Wilson * accessing this cache. This means in this mode we 5478475355fSChris Wilson * don't need to clflush on the CPU side, and on the 5488475355fSChris Wilson * GPU side we only need to flush internal caches to 5498475355fSChris Wilson * get data visible to the CPU. 5508475355fSChris Wilson * 5518475355fSChris Wilson * However, we maintain the display planes as UC, and so 5528475355fSChris Wilson * need to rebind when first used as such. 5538475355fSChris Wilson */ 5548475355fSChris Wilson cache_level = I915_CACHE_LLC; 5558475355fSChris Wilson else 5568475355fSChris Wilson cache_level = I915_CACHE_NONE; 5578475355fSChris Wilson 5588475355fSChris Wilson i915_gem_object_set_cache_coherency(obj, cache_level); 5598475355fSChris Wilson 560c471748dSMaarten Lankhorst i915_gem_object_init_memory_region(obj, mem); 5618475355fSChris Wilson 56297d55396SMatthew Auld return 0; 5638475355fSChris Wilson } 5648475355fSChris Wilson 565da1184cdSMatthew Auld struct drm_i915_gem_object * 566da1184cdSMatthew Auld i915_gem_object_create_shmem(struct drm_i915_private *i915, 567da1184cdSMatthew Auld resource_size_t size) 568da1184cdSMatthew Auld { 569da1184cdSMatthew Auld return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 570d22632c8SMatthew Auld size, 0, 0); 571da1184cdSMatthew Auld } 572da1184cdSMatthew Auld 5738475355fSChris Wilson /* Allocate a new GEM object and fill it with the supplied data */ 5748475355fSChris Wilson struct drm_i915_gem_object * 5758475355fSChris Wilson i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 576da1184cdSMatthew Auld const void *data, resource_size_t size) 5778475355fSChris Wilson { 5788475355fSChris Wilson struct drm_i915_gem_object *obj; 5798475355fSChris Wilson struct file *file; 580da1184cdSMatthew Auld resource_size_t offset; 5818475355fSChris Wilson int err; 5828475355fSChris Wilson 58332b7cf51SThomas Hellström GEM_WARN_ON(IS_DGFX(dev_priv)); 5848475355fSChris Wilson obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 5858475355fSChris Wilson if (IS_ERR(obj)) 5868475355fSChris Wilson return obj; 5878475355fSChris Wilson 5888475355fSChris Wilson GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 5898475355fSChris Wilson 5908475355fSChris Wilson file = obj->base.filp; 5918475355fSChris Wilson offset = 0; 5928475355fSChris Wilson do { 5938475355fSChris Wilson unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 5948475355fSChris Wilson struct page *page; 5958475355fSChris Wilson void *pgdata, *vaddr; 5968475355fSChris Wilson 5978475355fSChris Wilson err = pagecache_write_begin(file, file->f_mapping, 5988475355fSChris Wilson offset, len, 0, 5998475355fSChris Wilson &page, &pgdata); 6008475355fSChris Wilson if (err < 0) 6018475355fSChris Wilson goto fail; 6028475355fSChris Wilson 6038475355fSChris Wilson vaddr = kmap(page); 6048475355fSChris Wilson memcpy(vaddr, data, len); 6058475355fSChris Wilson kunmap(page); 6068475355fSChris Wilson 6078475355fSChris Wilson err = pagecache_write_end(file, file->f_mapping, 6088475355fSChris Wilson offset, len, len, 6098475355fSChris Wilson page, pgdata); 6108475355fSChris Wilson if (err < 0) 6118475355fSChris Wilson goto fail; 6128475355fSChris Wilson 6138475355fSChris Wilson size -= len; 6148475355fSChris Wilson data += len; 6158475355fSChris Wilson offset += len; 6168475355fSChris Wilson } while (size); 6178475355fSChris Wilson 6188475355fSChris Wilson return obj; 6198475355fSChris Wilson 6208475355fSChris Wilson fail: 6218475355fSChris Wilson i915_gem_object_put(obj); 6228475355fSChris Wilson return ERR_PTR(err); 6238475355fSChris Wilson } 624da1184cdSMatthew Auld 625da1184cdSMatthew Auld static int init_shmem(struct intel_memory_region *mem) 626da1184cdSMatthew Auld { 627da1184cdSMatthew Auld int err; 628da1184cdSMatthew Auld 629da1184cdSMatthew Auld err = i915_gemfs_init(mem->i915); 630da1184cdSMatthew Auld if (err) { 631da1184cdSMatthew Auld DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", 632da1184cdSMatthew Auld err); 633da1184cdSMatthew Auld } 634da1184cdSMatthew Auld 63538f1cb68SLukasz Fiedorowicz intel_memory_region_set_name(mem, "system"); 63638f1cb68SLukasz Fiedorowicz 637da1184cdSMatthew Auld return 0; /* Don't error, we can simply fallback to the kernel mnt */ 638da1184cdSMatthew Auld } 639da1184cdSMatthew Auld 640da1184cdSMatthew Auld static void release_shmem(struct intel_memory_region *mem) 641da1184cdSMatthew Auld { 642da1184cdSMatthew Auld i915_gemfs_fini(mem->i915); 643da1184cdSMatthew Auld } 644da1184cdSMatthew Auld 645da1184cdSMatthew Auld static const struct intel_memory_region_ops shmem_region_ops = { 646da1184cdSMatthew Auld .init = init_shmem, 647da1184cdSMatthew Auld .release = release_shmem, 64897d55396SMatthew Auld .init_object = shmem_object_init, 649da1184cdSMatthew Auld }; 650da1184cdSMatthew Auld 651d1487389SThomas Hellström struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, 652d1487389SThomas Hellström u16 type, u16 instance) 653da1184cdSMatthew Auld { 654da1184cdSMatthew Auld return intel_memory_region_create(i915, 0, 655da1184cdSMatthew Auld totalram_pages() << PAGE_SHIFT, 656da1184cdSMatthew Auld PAGE_SIZE, 0, 657d1487389SThomas Hellström type, instance, 658da1184cdSMatthew Auld &shmem_region_ops); 659da1184cdSMatthew Auld } 66041a9c75dSChris Wilson 66141a9c75dSChris Wilson bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 66241a9c75dSChris Wilson { 66341a9c75dSChris Wilson return obj->ops == &i915_gem_shmem_ops; 66441a9c75dSChris Wilson } 665