18475355fSChris Wilson /*
28475355fSChris Wilson * SPDX-License-Identifier: MIT
38475355fSChris Wilson *
48475355fSChris Wilson * Copyright © 2014-2016 Intel Corporation
58475355fSChris Wilson */
68475355fSChris Wilson
78475355fSChris Wilson #include <linux/pagevec.h>
882508de2SJani Nikula #include <linux/shmem_fs.h>
98475355fSChris Wilson #include <linux/swap.h>
108475355fSChris Wilson
115f2ec909SJani Nikula #include <drm/drm_cache.h>
125f2ec909SJani Nikula
13da1184cdSMatthew Auld #include "gem/i915_gem_region.h"
148475355fSChris Wilson #include "i915_drv.h"
158475355fSChris Wilson #include "i915_gem_object.h"
160438fd1aSJani Nikula #include "i915_gem_tiling.h"
170438fd1aSJani Nikula #include "i915_gemfs.h"
1837d63f8fSChris Wilson #include "i915_scatterlist.h"
19a09d9a80SJani Nikula #include "i915_trace.h"
208475355fSChris Wilson
218475355fSChris Wilson /*
220b62af28SMatthew Wilcox (Oracle) * Move folios to appropriate lru and release the batch, decrementing the
230b62af28SMatthew Wilcox (Oracle) * ref count of those folios.
248475355fSChris Wilson */
check_release_folio_batch(struct folio_batch * fbatch)250b62af28SMatthew Wilcox (Oracle) static void check_release_folio_batch(struct folio_batch *fbatch)
268475355fSChris Wilson {
270b62af28SMatthew Wilcox (Oracle) check_move_unevictable_folios(fbatch);
280b62af28SMatthew Wilcox (Oracle) __folio_batch_release(fbatch);
298475355fSChris Wilson cond_resched();
308475355fSChris Wilson }
318475355fSChris Wilson
shmem_sg_free_table(struct sg_table * st,struct address_space * mapping,bool dirty,bool backup)32cad7109aSThomas Hellström void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
33f05b985eSThomas Hellström bool dirty, bool backup)
348475355fSChris Wilson {
35f05b985eSThomas Hellström struct sgt_iter sgt_iter;
360b62af28SMatthew Wilcox (Oracle) struct folio_batch fbatch;
370b62af28SMatthew Wilcox (Oracle) struct folio *last = NULL;
38f05b985eSThomas Hellström struct page *page;
39f05b985eSThomas Hellström
40f05b985eSThomas Hellström mapping_clear_unevictable(mapping);
41f05b985eSThomas Hellström
420b62af28SMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
43f05b985eSThomas Hellström for_each_sgt_page(page, sgt_iter, st) {
440b62af28SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
450b62af28SMatthew Wilcox (Oracle)
460b62af28SMatthew Wilcox (Oracle) if (folio == last)
470b62af28SMatthew Wilcox (Oracle) continue;
480b62af28SMatthew Wilcox (Oracle) last = folio;
49f05b985eSThomas Hellström if (dirty)
500b62af28SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
51f05b985eSThomas Hellström if (backup)
520b62af28SMatthew Wilcox (Oracle) folio_mark_accessed(folio);
53f05b985eSThomas Hellström
540b62af28SMatthew Wilcox (Oracle) if (!folio_batch_add(&fbatch, folio))
550b62af28SMatthew Wilcox (Oracle) check_release_folio_batch(&fbatch);
56f05b985eSThomas Hellström }
570b62af28SMatthew Wilcox (Oracle) if (fbatch.nr)
580b62af28SMatthew Wilcox (Oracle) check_release_folio_batch(&fbatch);
59f05b985eSThomas Hellström
60f05b985eSThomas Hellström sg_free_table(st);
61f05b985eSThomas Hellström }
62f05b985eSThomas Hellström
shmem_sg_alloc_table(struct drm_i915_private * i915,struct sg_table * st,size_t size,struct intel_memory_region * mr,struct address_space * mapping,unsigned int max_segment)63cad7109aSThomas Hellström int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
64f05b985eSThomas Hellström size_t size, struct intel_memory_region *mr,
65f05b985eSThomas Hellström struct address_space *mapping,
66f05b985eSThomas Hellström unsigned int max_segment)
67f05b985eSThomas Hellström {
68c3bfba9aSChris Wilson unsigned int page_count; /* restricted by sg_alloc_table */
698475355fSChris Wilson unsigned long i;
708475355fSChris Wilson struct scatterlist *sg;
710b62af28SMatthew Wilcox (Oracle) unsigned long next_pfn = 0; /* suppress gcc warning */
728475355fSChris Wilson gfp_t noreclaim;
738475355fSChris Wilson int ret;
748475355fSChris Wilson
75c3bfba9aSChris Wilson if (overflows_type(size / PAGE_SIZE, page_count))
76c3bfba9aSChris Wilson return -E2BIG;
77c3bfba9aSChris Wilson
78c3bfba9aSChris Wilson page_count = size / PAGE_SIZE;
798475355fSChris Wilson /*
808475355fSChris Wilson * If there's no chance of allocating enough pages for the whole
818475355fSChris Wilson * object, bail early.
828475355fSChris Wilson */
83f05b985eSThomas Hellström if (size > resource_size(&mr->region))
84cad7109aSThomas Hellström return -ENOMEM;
858475355fSChris Wilson
86a8c18becSChris Wilson if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
87cad7109aSThomas Hellström return -ENOMEM;
888475355fSChris Wilson
898475355fSChris Wilson /*
908475355fSChris Wilson * Get the list of pages out of our struct file. They'll be pinned
918475355fSChris Wilson * at this point until we release them.
928475355fSChris Wilson *
938475355fSChris Wilson * Fail silently without starting the shrinker
948475355fSChris Wilson */
958475355fSChris Wilson mapping_set_unevictable(mapping);
968475355fSChris Wilson noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
978475355fSChris Wilson noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
988475355fSChris Wilson
998475355fSChris Wilson sg = st->sgl;
1008475355fSChris Wilson st->nents = 0;
1018475355fSChris Wilson for (i = 0; i < page_count; i++) {
1020b62af28SMatthew Wilcox (Oracle) struct folio *folio;
103*863a8eb3SMatthew Wilcox (Oracle) unsigned long nr_pages;
1048475355fSChris Wilson const unsigned int shrink[] = {
1053b4fa964SChris Wilson I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
1068475355fSChris Wilson 0,
1078475355fSChris Wilson }, *s = shrink;
1088475355fSChris Wilson gfp_t gfp = noreclaim;
1098475355fSChris Wilson
1108475355fSChris Wilson do {
1118475355fSChris Wilson cond_resched();
1120b62af28SMatthew Wilcox (Oracle) folio = shmem_read_folio_gfp(mapping, i, gfp);
1130b62af28SMatthew Wilcox (Oracle) if (!IS_ERR(folio))
1148475355fSChris Wilson break;
1158475355fSChris Wilson
1168475355fSChris Wilson if (!*s) {
1170b62af28SMatthew Wilcox (Oracle) ret = PTR_ERR(folio);
1188475355fSChris Wilson goto err_sg;
1198475355fSChris Wilson }
1208475355fSChris Wilson
121cf41a8f1SMaarten Lankhorst i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
1228475355fSChris Wilson
1238475355fSChris Wilson /*
1248475355fSChris Wilson * We've tried hard to allocate the memory by reaping
1258475355fSChris Wilson * our own buffer, now let the real VM do its job and
1268475355fSChris Wilson * go down in flames if truly OOM.
1278475355fSChris Wilson *
1288475355fSChris Wilson * However, since graphics tend to be disposable,
1298475355fSChris Wilson * defer the oom here by reporting the ENOMEM back
1308475355fSChris Wilson * to userspace.
1318475355fSChris Wilson */
1328475355fSChris Wilson if (!*s) {
1338475355fSChris Wilson /* reclaim and warn, but no oom */
1348475355fSChris Wilson gfp = mapping_gfp_mask(mapping);
1358475355fSChris Wilson
1368475355fSChris Wilson /*
1378475355fSChris Wilson * Our bo are always dirty and so we require
1388475355fSChris Wilson * kswapd to reclaim our pages (direct reclaim
1398475355fSChris Wilson * does not effectively begin pageout of our
1408475355fSChris Wilson * buffers on its own). However, direct reclaim
1418475355fSChris Wilson * only waits for kswapd when under allocation
1428475355fSChris Wilson * congestion. So as a result __GFP_RECLAIM is
1438475355fSChris Wilson * unreliable and fails to actually reclaim our
1448475355fSChris Wilson * dirty pages -- unless you try over and over
1458475355fSChris Wilson * again with !__GFP_NORETRY. However, we still
1468475355fSChris Wilson * want to fail this allocation rather than
1478475355fSChris Wilson * trigger the out-of-memory killer and for
1488475355fSChris Wilson * this we want __GFP_RETRY_MAYFAIL.
1498475355fSChris Wilson */
150a8c18becSChris Wilson gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
1518475355fSChris Wilson }
1528475355fSChris Wilson } while (1);
1538475355fSChris Wilson
154*863a8eb3SMatthew Wilcox (Oracle) nr_pages = min_t(unsigned long,
155*863a8eb3SMatthew Wilcox (Oracle) folio_nr_pages(folio), page_count - i);
1568475355fSChris Wilson if (!i ||
1578475355fSChris Wilson sg->length >= max_segment ||
1580b62af28SMatthew Wilcox (Oracle) folio_pfn(folio) != next_pfn) {
159f05b985eSThomas Hellström if (i)
1608475355fSChris Wilson sg = sg_next(sg);
161f05b985eSThomas Hellström
1628475355fSChris Wilson st->nents++;
163*863a8eb3SMatthew Wilcox (Oracle) sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
1648475355fSChris Wilson } else {
1650b62af28SMatthew Wilcox (Oracle) /* XXX: could overflow? */
166*863a8eb3SMatthew Wilcox (Oracle) sg->length += nr_pages * PAGE_SIZE;
1678475355fSChris Wilson }
168*863a8eb3SMatthew Wilcox (Oracle) next_pfn = folio_pfn(folio) + nr_pages;
169*863a8eb3SMatthew Wilcox (Oracle) i += nr_pages - 1;
1708475355fSChris Wilson
1718475355fSChris Wilson /* Check that the i965g/gm workaround works. */
1720b62af28SMatthew Wilcox (Oracle) GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
1738475355fSChris Wilson }
174f05b985eSThomas Hellström if (sg) /* loop terminated early; short sg table */
1758475355fSChris Wilson sg_mark_end(sg);
1768475355fSChris Wilson
1778475355fSChris Wilson /* Trim unused sg entries to avoid wasting memory. */
1788475355fSChris Wilson i915_sg_trim(st);
1798475355fSChris Wilson
180cad7109aSThomas Hellström return 0;
181f05b985eSThomas Hellström err_sg:
182f05b985eSThomas Hellström sg_mark_end(sg);
183f05b985eSThomas Hellström if (sg != st->sgl) {
184cad7109aSThomas Hellström shmem_sg_free_table(st, mapping, false, false);
185f05b985eSThomas Hellström } else {
186f05b985eSThomas Hellström mapping_clear_unevictable(mapping);
187f05b985eSThomas Hellström sg_free_table(st);
188f05b985eSThomas Hellström }
189f05b985eSThomas Hellström
190f05b985eSThomas Hellström /*
191f05b985eSThomas Hellström * shmemfs first checks if there is enough memory to allocate the page
192f05b985eSThomas Hellström * and reports ENOSPC should there be insufficient, along with the usual
193f05b985eSThomas Hellström * ENOMEM for a genuine allocation failure.
194f05b985eSThomas Hellström *
195f05b985eSThomas Hellström * We use ENOSPC in our driver to mean that we have run out of aperture
196f05b985eSThomas Hellström * space and so want to translate the error from shmemfs back to our
197f05b985eSThomas Hellström * usual understanding of ENOMEM.
198f05b985eSThomas Hellström */
199f05b985eSThomas Hellström if (ret == -ENOSPC)
200f05b985eSThomas Hellström ret = -ENOMEM;
201f05b985eSThomas Hellström
202cad7109aSThomas Hellström return ret;
203f05b985eSThomas Hellström }
204f05b985eSThomas Hellström
shmem_get_pages(struct drm_i915_gem_object * obj)205f05b985eSThomas Hellström static int shmem_get_pages(struct drm_i915_gem_object *obj)
206f05b985eSThomas Hellström {
207f05b985eSThomas Hellström struct drm_i915_private *i915 = to_i915(obj->base.dev);
208f05b985eSThomas Hellström struct intel_memory_region *mem = obj->mm.region;
209f05b985eSThomas Hellström struct address_space *mapping = obj->base.filp->f_mapping;
21078a07fe7SRobert Beckett unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
211f05b985eSThomas Hellström struct sg_table *st;
212f05b985eSThomas Hellström struct sgt_iter sgt_iter;
213f05b985eSThomas Hellström struct page *page;
214f05b985eSThomas Hellström int ret;
215f05b985eSThomas Hellström
216f05b985eSThomas Hellström /*
217f05b985eSThomas Hellström * Assert that the object is not currently in any GPU domain. As it
218f05b985eSThomas Hellström * wasn't in the GTT, there shouldn't be any way it could have been in
219f05b985eSThomas Hellström * a GPU cache
220f05b985eSThomas Hellström */
221f05b985eSThomas Hellström GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
222f05b985eSThomas Hellström GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
223f05b985eSThomas Hellström
224f05b985eSThomas Hellström rebuild_st:
225a8c18becSChris Wilson st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
226cad7109aSThomas Hellström if (!st)
227cad7109aSThomas Hellström return -ENOMEM;
228cad7109aSThomas Hellström
229cad7109aSThomas Hellström ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
230cad7109aSThomas Hellström max_segment);
231cad7109aSThomas Hellström if (ret)
232f05b985eSThomas Hellström goto err_st;
233f05b985eSThomas Hellström
2348475355fSChris Wilson ret = i915_gem_gtt_prepare_pages(obj, st);
2358475355fSChris Wilson if (ret) {
2368475355fSChris Wilson /*
2378475355fSChris Wilson * DMA remapping failed? One possible cause is that
2388475355fSChris Wilson * it could not reserve enough large entries, asking
2398475355fSChris Wilson * for PAGE_SIZE chunks instead may be helpful.
2408475355fSChris Wilson */
2418475355fSChris Wilson if (max_segment > PAGE_SIZE) {
2428475355fSChris Wilson for_each_sgt_page(page, sgt_iter, st)
2438475355fSChris Wilson put_page(page);
2448475355fSChris Wilson sg_free_table(st);
245f05b985eSThomas Hellström kfree(st);
2468475355fSChris Wilson
2478475355fSChris Wilson max_segment = PAGE_SIZE;
2488475355fSChris Wilson goto rebuild_st;
2498475355fSChris Wilson } else {
2508ff5446aSThomas Zimmermann dev_warn(i915->drm.dev,
251cc328c9eSNirmoy Das "Failed to DMA remap %zu pages\n",
252c3bfba9aSChris Wilson obj->base.size >> PAGE_SHIFT);
2538475355fSChris Wilson goto err_pages;
2548475355fSChris Wilson }
2558475355fSChris Wilson }
2568475355fSChris Wilson
2578475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj))
2588475355fSChris Wilson i915_gem_object_do_bit_17_swizzle(obj, st);
2598475355fSChris Wilson
26030f1dccdSMatthew Auld if (i915_gem_object_can_bypass_llc(obj))
26113d29c82SMatthew Auld obj->cache_dirty = true;
26213d29c82SMatthew Auld
2638c949515SMatthew Auld __i915_gem_object_set_pages(obj, st);
2648475355fSChris Wilson
2658475355fSChris Wilson return 0;
2668475355fSChris Wilson
2678475355fSChris Wilson err_pages:
268cad7109aSThomas Hellström shmem_sg_free_table(st, mapping, false, false);
2698475355fSChris Wilson /*
2708475355fSChris Wilson * shmemfs first checks if there is enough memory to allocate the page
2718475355fSChris Wilson * and reports ENOSPC should there be insufficient, along with the usual
2728475355fSChris Wilson * ENOMEM for a genuine allocation failure.
2738475355fSChris Wilson *
2748475355fSChris Wilson * We use ENOSPC in our driver to mean that we have run out of aperture
2758475355fSChris Wilson * space and so want to translate the error from shmemfs back to our
2768475355fSChris Wilson * usual understanding of ENOMEM.
2778475355fSChris Wilson */
278f05b985eSThomas Hellström err_st:
2798475355fSChris Wilson if (ret == -ENOSPC)
2808475355fSChris Wilson ret = -ENOMEM;
2818475355fSChris Wilson
282cad7109aSThomas Hellström kfree(st);
283cad7109aSThomas Hellström
2848475355fSChris Wilson return ret;
2858475355fSChris Wilson }
2868475355fSChris Wilson
2877ae03459SMatthew Auld static int
shmem_truncate(struct drm_i915_gem_object * obj)288f033428dSChris Wilson shmem_truncate(struct drm_i915_gem_object *obj)
289f033428dSChris Wilson {
290f033428dSChris Wilson /*
291f033428dSChris Wilson * Our goal here is to return as much of the memory as
292f033428dSChris Wilson * is possible back to the system as we are called from OOM.
293f033428dSChris Wilson * To do this we must instruct the shmfs to drop all of its
294f033428dSChris Wilson * backing pages, *now*.
295f033428dSChris Wilson */
296f033428dSChris Wilson shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
297f033428dSChris Wilson obj->mm.madv = __I915_MADV_PURGED;
298f033428dSChris Wilson obj->mm.pages = ERR_PTR(-EFAULT);
2997ae03459SMatthew Auld
3007ae03459SMatthew Auld return 0;
301f033428dSChris Wilson }
302f033428dSChris Wilson
__shmem_writeback(size_t size,struct address_space * mapping)3037ae03459SMatthew Auld void __shmem_writeback(size_t size, struct address_space *mapping)
304f033428dSChris Wilson {
305f033428dSChris Wilson struct writeback_control wbc = {
306f033428dSChris Wilson .sync_mode = WB_SYNC_NONE,
307f033428dSChris Wilson .nr_to_write = SWAP_CLUSTER_MAX,
308f033428dSChris Wilson .range_start = 0,
309f033428dSChris Wilson .range_end = LLONG_MAX,
310f033428dSChris Wilson .for_reclaim = 1,
311f033428dSChris Wilson };
312f033428dSChris Wilson unsigned long i;
313f033428dSChris Wilson
314f033428dSChris Wilson /*
315f033428dSChris Wilson * Leave mmapings intact (GTT will have been revoked on unbinding,
316f033428dSChris Wilson * leaving only CPU mmapings around) and add those pages to the LRU
317f033428dSChris Wilson * instead of invoking writeback so they are aged and paged out
318f033428dSChris Wilson * as normal.
319f033428dSChris Wilson */
320f033428dSChris Wilson
321f033428dSChris Wilson /* Begin writeback on each dirty page */
322f05b985eSThomas Hellström for (i = 0; i < size >> PAGE_SHIFT; i++) {
323f033428dSChris Wilson struct page *page;
324f033428dSChris Wilson
3259dfc8ff3SMatthew Wilcox (Oracle) page = find_lock_page(mapping, i);
3269dfc8ff3SMatthew Wilcox (Oracle) if (!page)
327f033428dSChris Wilson continue;
328f033428dSChris Wilson
329f033428dSChris Wilson if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
330f033428dSChris Wilson int ret;
331f033428dSChris Wilson
332f033428dSChris Wilson SetPageReclaim(page);
333f033428dSChris Wilson ret = mapping->a_ops->writepage(page, &wbc);
334f033428dSChris Wilson if (!PageWriteback(page))
335f033428dSChris Wilson ClearPageReclaim(page);
336f033428dSChris Wilson if (!ret)
337f033428dSChris Wilson goto put;
338f033428dSChris Wilson }
339f033428dSChris Wilson unlock_page(page);
340f033428dSChris Wilson put:
341f033428dSChris Wilson put_page(page);
342f033428dSChris Wilson }
343f033428dSChris Wilson }
344f033428dSChris Wilson
345f05b985eSThomas Hellström static void
shmem_writeback(struct drm_i915_gem_object * obj)346f05b985eSThomas Hellström shmem_writeback(struct drm_i915_gem_object *obj)
347f05b985eSThomas Hellström {
348f05b985eSThomas Hellström __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
349f05b985eSThomas Hellström }
350f05b985eSThomas Hellström
shmem_shrink(struct drm_i915_gem_object * obj,unsigned int flags)351ffa3fe08SMatthew Auld static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
35293544177SMatthew Auld {
35393544177SMatthew Auld switch (obj->mm.madv) {
35493544177SMatthew Auld case I915_MADV_DONTNEED:
35593544177SMatthew Auld return i915_gem_object_truncate(obj);
35693544177SMatthew Auld case __I915_MADV_PURGED:
35793544177SMatthew Auld return 0;
35893544177SMatthew Auld }
35993544177SMatthew Auld
360ffa3fe08SMatthew Auld if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
36193544177SMatthew Auld shmem_writeback(obj);
36293544177SMatthew Auld
36393544177SMatthew Auld return 0;
36493544177SMatthew Auld }
36593544177SMatthew Auld
3668475355fSChris Wilson void
__i915_gem_object_release_shmem(struct drm_i915_gem_object * obj,struct sg_table * pages,bool needs_clflush)3678475355fSChris Wilson __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
3688475355fSChris Wilson struct sg_table *pages,
3698475355fSChris Wilson bool needs_clflush)
3708475355fSChris Wilson {
371d70af579SMatthew Auld struct drm_i915_private *i915 = to_i915(obj->base.dev);
372d70af579SMatthew Auld
3738475355fSChris Wilson GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
3748475355fSChris Wilson
3758475355fSChris Wilson if (obj->mm.madv == I915_MADV_DONTNEED)
3768475355fSChris Wilson obj->mm.dirty = false;
3778475355fSChris Wilson
3788475355fSChris Wilson if (needs_clflush &&
3798475355fSChris Wilson (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
3808475355fSChris Wilson !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
3818475355fSChris Wilson drm_clflush_sg(pages);
3828475355fSChris Wilson
3838475355fSChris Wilson __start_cpu_write(obj);
384d70af579SMatthew Auld /*
3850aeec60cSNiranjana Vishwanathapura * On non-LLC igfx platforms, force the flush-on-acquire if this is ever
386d70af579SMatthew Auld * swapped-in. Our async flush path is not trust worthy enough yet(and
387d70af579SMatthew Auld * happens in the wrong order), and with some tricks it's conceivable
388d70af579SMatthew Auld * for userspace to change the cache-level to I915_CACHE_NONE after the
389d70af579SMatthew Auld * pages are swapped-in, and since execbuf binds the object before doing
390d70af579SMatthew Auld * the async flush, we have a race window.
391d70af579SMatthew Auld */
3920aeec60cSNiranjana Vishwanathapura if (!HAS_LLC(i915) && !IS_DGFX(i915))
393d70af579SMatthew Auld obj->cache_dirty = true;
3948475355fSChris Wilson }
3958475355fSChris Wilson
i915_gem_object_put_pages_shmem(struct drm_i915_gem_object * obj,struct sg_table * pages)396a85fffe3SMaarten Lankhorst void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
3978475355fSChris Wilson {
3988475355fSChris Wilson __i915_gem_object_release_shmem(obj, pages, true);
3998475355fSChris Wilson
4008475355fSChris Wilson i915_gem_gtt_finish_pages(obj, pages);
4018475355fSChris Wilson
4028475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj))
4038475355fSChris Wilson i915_gem_object_save_bit_17_swizzle(obj, pages);
4048475355fSChris Wilson
405cad7109aSThomas Hellström shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
406f05b985eSThomas Hellström obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
407cad7109aSThomas Hellström kfree(pages);
4088475355fSChris Wilson obj->mm.dirty = false;
4098475355fSChris Wilson }
4108475355fSChris Wilson
411a85fffe3SMaarten Lankhorst static void
shmem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)412a85fffe3SMaarten Lankhorst shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
413a85fffe3SMaarten Lankhorst {
414a85fffe3SMaarten Lankhorst if (likely(i915_gem_object_has_struct_page(obj)))
415a85fffe3SMaarten Lankhorst i915_gem_object_put_pages_shmem(obj, pages);
416a85fffe3SMaarten Lankhorst else
417a85fffe3SMaarten Lankhorst i915_gem_object_put_pages_phys(obj, pages);
418a85fffe3SMaarten Lankhorst }
419a85fffe3SMaarten Lankhorst
4208475355fSChris Wilson static int
shmem_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * arg)4218475355fSChris Wilson shmem_pwrite(struct drm_i915_gem_object *obj,
4228475355fSChris Wilson const struct drm_i915_gem_pwrite *arg)
4238475355fSChris Wilson {
4248475355fSChris Wilson struct address_space *mapping = obj->base.filp->f_mapping;
425c5edd542SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
4268475355fSChris Wilson char __user *user_data = u64_to_user_ptr(arg->data_ptr);
4278475355fSChris Wilson u64 remain, offset;
4288475355fSChris Wilson unsigned int pg;
4298475355fSChris Wilson
4308475355fSChris Wilson /* Caller already validated user args */
4318475355fSChris Wilson GEM_BUG_ON(!access_ok(user_data, arg->size));
4328475355fSChris Wilson
433a6117097SMaarten Lankhorst if (!i915_gem_object_has_struct_page(obj))
434a6117097SMaarten Lankhorst return i915_gem_object_pwrite_phys(obj, arg);
435a6117097SMaarten Lankhorst
4368475355fSChris Wilson /*
4378475355fSChris Wilson * Before we instantiate/pin the backing store for our use, we
4388475355fSChris Wilson * can prepopulate the shmemfs filp efficiently using a write into
4398475355fSChris Wilson * the pagecache. We avoid the penalty of instantiating all the
4408475355fSChris Wilson * pages, important if the user is just writing to a few and never
4418475355fSChris Wilson * uses the object on the GPU, and using a direct write into shmemfs
4428475355fSChris Wilson * allows it to avoid the cost of retrieving a page (either swapin
4438475355fSChris Wilson * or clearing-before-use) before it is overwritten.
4448475355fSChris Wilson */
4458475355fSChris Wilson if (i915_gem_object_has_pages(obj))
4468475355fSChris Wilson return -ENODEV;
4478475355fSChris Wilson
4488475355fSChris Wilson if (obj->mm.madv != I915_MADV_WILLNEED)
4498475355fSChris Wilson return -EFAULT;
4508475355fSChris Wilson
4518475355fSChris Wilson /*
4528475355fSChris Wilson * Before the pages are instantiated the object is treated as being
4538475355fSChris Wilson * in the CPU domain. The pages will be clflushed as required before
4548475355fSChris Wilson * use, and we can freely write into the pages directly. If userspace
4558475355fSChris Wilson * races pwrite with any other operation; corruption will ensue -
4568475355fSChris Wilson * that is userspace's prerogative!
4578475355fSChris Wilson */
4588475355fSChris Wilson
4598475355fSChris Wilson remain = arg->size;
4608475355fSChris Wilson offset = arg->offset;
4618475355fSChris Wilson pg = offset_in_page(offset);
4628475355fSChris Wilson
4638475355fSChris Wilson do {
4648475355fSChris Wilson unsigned int len, unwritten;
4658475355fSChris Wilson struct page *page;
4668475355fSChris Wilson void *data, *vaddr;
4678475355fSChris Wilson int err;
468ab438a61SJani Nikula char __maybe_unused c;
4698475355fSChris Wilson
4708475355fSChris Wilson len = PAGE_SIZE - pg;
4718475355fSChris Wilson if (len > remain)
4728475355fSChris Wilson len = remain;
4738475355fSChris Wilson
4748475355fSChris Wilson /* Prefault the user page to reduce potential recursion */
4758475355fSChris Wilson err = __get_user(c, user_data);
4768475355fSChris Wilson if (err)
4778475355fSChris Wilson return err;
4788475355fSChris Wilson
4798475355fSChris Wilson err = __get_user(c, user_data + len - 1);
4808475355fSChris Wilson if (err)
4818475355fSChris Wilson return err;
4828475355fSChris Wilson
483c5edd542SMatthew Wilcox (Oracle) err = aops->write_begin(obj->base.filp, mapping, offset, len,
4848475355fSChris Wilson &page, &data);
4858475355fSChris Wilson if (err < 0)
4868475355fSChris Wilson return err;
4878475355fSChris Wilson
4888475355fSChris Wilson vaddr = kmap_atomic(page);
4898475355fSChris Wilson unwritten = __copy_from_user_inatomic(vaddr + pg,
4908475355fSChris Wilson user_data,
4918475355fSChris Wilson len);
4928475355fSChris Wilson kunmap_atomic(vaddr);
4938475355fSChris Wilson
494c5edd542SMatthew Wilcox (Oracle) err = aops->write_end(obj->base.filp, mapping, offset, len,
495c5edd542SMatthew Wilcox (Oracle) len - unwritten, page, data);
4968475355fSChris Wilson if (err < 0)
4978475355fSChris Wilson return err;
4988475355fSChris Wilson
4998475355fSChris Wilson /* We don't handle -EFAULT, leave it to the caller to check */
5008475355fSChris Wilson if (unwritten)
5018475355fSChris Wilson return -ENODEV;
5028475355fSChris Wilson
5038475355fSChris Wilson remain -= len;
5048475355fSChris Wilson user_data += len;
5058475355fSChris Wilson offset += len;
5068475355fSChris Wilson pg = 0;
5078475355fSChris Wilson } while (remain);
5088475355fSChris Wilson
5098475355fSChris Wilson return 0;
5108475355fSChris Wilson }
5118475355fSChris Wilson
512a6117097SMaarten Lankhorst static int
shmem_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * arg)513a6117097SMaarten Lankhorst shmem_pread(struct drm_i915_gem_object *obj,
514a6117097SMaarten Lankhorst const struct drm_i915_gem_pread *arg)
515a6117097SMaarten Lankhorst {
516a6117097SMaarten Lankhorst if (!i915_gem_object_has_struct_page(obj))
517a6117097SMaarten Lankhorst return i915_gem_object_pread_phys(obj, arg);
518a6117097SMaarten Lankhorst
519a6117097SMaarten Lankhorst return -ENODEV;
520a6117097SMaarten Lankhorst }
521a6117097SMaarten Lankhorst
shmem_release(struct drm_i915_gem_object * obj)5220c159ffeSChris Wilson static void shmem_release(struct drm_i915_gem_object *obj)
5230c159ffeSChris Wilson {
5240ff37575SThomas Hellström if (i915_gem_object_has_struct_page(obj))
525da1184cdSMatthew Auld i915_gem_object_release_memory_region(obj);
526da1184cdSMatthew Auld
5270c159ffeSChris Wilson fput(obj->base.filp);
5280c159ffeSChris Wilson }
5290c159ffeSChris Wilson
5308475355fSChris Wilson const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
5317d192daaSChris Wilson .name = "i915_gem_object_shmem",
532c471748dSMaarten Lankhorst .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
5338475355fSChris Wilson
5348475355fSChris Wilson .get_pages = shmem_get_pages,
5358475355fSChris Wilson .put_pages = shmem_put_pages,
536f033428dSChris Wilson .truncate = shmem_truncate,
537ffa3fe08SMatthew Auld .shrink = shmem_shrink,
5388475355fSChris Wilson
5398475355fSChris Wilson .pwrite = shmem_pwrite,
540a6117097SMaarten Lankhorst .pread = shmem_pread,
5410c159ffeSChris Wilson
5420c159ffeSChris Wilson .release = shmem_release,
5438475355fSChris Wilson };
5448475355fSChris Wilson
__create_shmem(struct drm_i915_private * i915,struct drm_gem_object * obj,resource_size_t size)545da1184cdSMatthew Auld static int __create_shmem(struct drm_i915_private *i915,
5468475355fSChris Wilson struct drm_gem_object *obj,
547da1184cdSMatthew Auld resource_size_t size)
5488475355fSChris Wilson {
5498475355fSChris Wilson unsigned long flags = VM_NORESERVE;
5508475355fSChris Wilson struct file *filp;
5518475355fSChris Wilson
5528475355fSChris Wilson drm_gem_private_object_init(&i915->drm, obj, size);
5538475355fSChris Wilson
554662c04e2SGwan-gyeong Mun /* XXX: The __shmem_file_setup() function returns -EINVAL if size is
555662c04e2SGwan-gyeong Mun * greater than MAX_LFS_FILESIZE.
556662c04e2SGwan-gyeong Mun * To handle the same error as other code that returns -E2BIG when
557662c04e2SGwan-gyeong Mun * the size is too large, we add a code that returns -E2BIG when the
558662c04e2SGwan-gyeong Mun * size is larger than the size that can be handled.
559662c04e2SGwan-gyeong Mun * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
560662c04e2SGwan-gyeong Mun * so we only needs to check when BITS_PER_LONG is 64.
561662c04e2SGwan-gyeong Mun * If BITS_PER_LONG is 32, E2BIG checks are processed when
562662c04e2SGwan-gyeong Mun * i915_gem_object_size_2big() is called before init_object() callback
563662c04e2SGwan-gyeong Mun * is called.
564662c04e2SGwan-gyeong Mun */
565662c04e2SGwan-gyeong Mun if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
566662c04e2SGwan-gyeong Mun return -E2BIG;
567662c04e2SGwan-gyeong Mun
5688475355fSChris Wilson if (i915->mm.gemfs)
5698475355fSChris Wilson filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
5708475355fSChris Wilson flags);
5718475355fSChris Wilson else
5728475355fSChris Wilson filp = shmem_file_setup("i915", size, flags);
5738475355fSChris Wilson if (IS_ERR(filp))
5748475355fSChris Wilson return PTR_ERR(filp);
5758475355fSChris Wilson
5768475355fSChris Wilson obj->filp = filp;
5778475355fSChris Wilson return 0;
5788475355fSChris Wilson }
5798475355fSChris Wilson
shmem_object_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)58097d55396SMatthew Auld static int shmem_object_init(struct intel_memory_region *mem,
58197d55396SMatthew Auld struct drm_i915_gem_object *obj,
5829b78b5daSMatthew Auld resource_size_t offset,
583da1184cdSMatthew Auld resource_size_t size,
584d22632c8SMatthew Auld resource_size_t page_size,
585da1184cdSMatthew Auld unsigned int flags)
5868475355fSChris Wilson {
5877867d709SChris Wilson static struct lock_class_key lock_class;
588da1184cdSMatthew Auld struct drm_i915_private *i915 = mem->i915;
5898475355fSChris Wilson struct address_space *mapping;
5908475355fSChris Wilson unsigned int cache_level;
5918475355fSChris Wilson gfp_t mask;
5928475355fSChris Wilson int ret;
5938475355fSChris Wilson
594da1184cdSMatthew Auld ret = __create_shmem(i915, &obj->base, size);
5958475355fSChris Wilson if (ret)
59697d55396SMatthew Auld return ret;
5978475355fSChris Wilson
5988475355fSChris Wilson mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
5998475355fSChris Wilson if (IS_I965GM(i915) || IS_I965G(i915)) {
6008475355fSChris Wilson /* 965gm cannot relocate objects above 4GiB. */
6018475355fSChris Wilson mask &= ~__GFP_HIGHMEM;
6028475355fSChris Wilson mask |= __GFP_DMA32;
6038475355fSChris Wilson }
6048475355fSChris Wilson
6058475355fSChris Wilson mapping = obj->base.filp->f_mapping;
6068475355fSChris Wilson mapping_set_gfp_mask(mapping, mask);
6078475355fSChris Wilson GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
6088475355fSChris Wilson
60944e4c568SAravind Iddamsetty i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
6100ff37575SThomas Hellström obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
6118475355fSChris Wilson obj->write_domain = I915_GEM_DOMAIN_CPU;
6128475355fSChris Wilson obj->read_domains = I915_GEM_DOMAIN_CPU;
6138475355fSChris Wilson
6140fbcf570SFei Yang /*
6150fbcf570SFei Yang * MTL doesn't snoop CPU cache by default for GPU access (namely
6160fbcf570SFei Yang * 1-way coherency). However some UMD's are currently depending on
6170fbcf570SFei Yang * that. Make 1-way coherent the default setting for MTL. A follow
6180fbcf570SFei Yang * up patch will extend the GEM_CREATE uAPI to allow UMD's specify
6190fbcf570SFei Yang * caching mode at BO creation time
6200fbcf570SFei Yang */
6210fbcf570SFei Yang if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)))
6228475355fSChris Wilson /* On some devices, we can have the GPU use the LLC (the CPU
6238475355fSChris Wilson * cache) for about a 10% performance improvement
6248475355fSChris Wilson * compared to uncached. Graphics requests other than
6258475355fSChris Wilson * display scanout are coherent with the CPU in
6268475355fSChris Wilson * accessing this cache. This means in this mode we
6278475355fSChris Wilson * don't need to clflush on the CPU side, and on the
6288475355fSChris Wilson * GPU side we only need to flush internal caches to
6298475355fSChris Wilson * get data visible to the CPU.
6308475355fSChris Wilson *
6318475355fSChris Wilson * However, we maintain the display planes as UC, and so
6328475355fSChris Wilson * need to rebind when first used as such.
6338475355fSChris Wilson */
6348475355fSChris Wilson cache_level = I915_CACHE_LLC;
6358475355fSChris Wilson else
6368475355fSChris Wilson cache_level = I915_CACHE_NONE;
6378475355fSChris Wilson
6388475355fSChris Wilson i915_gem_object_set_cache_coherency(obj, cache_level);
6398475355fSChris Wilson
640c471748dSMaarten Lankhorst i915_gem_object_init_memory_region(obj, mem);
6418475355fSChris Wilson
64297d55396SMatthew Auld return 0;
6438475355fSChris Wilson }
6448475355fSChris Wilson
645da1184cdSMatthew Auld struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private * i915,resource_size_t size)646da1184cdSMatthew Auld i915_gem_object_create_shmem(struct drm_i915_private *i915,
647da1184cdSMatthew Auld resource_size_t size)
648da1184cdSMatthew Auld {
649da1184cdSMatthew Auld return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
650d22632c8SMatthew Auld size, 0, 0);
651da1184cdSMatthew Auld }
652da1184cdSMatthew Auld
6538475355fSChris Wilson /* Allocate a new GEM object and fill it with the supplied data */
6548475355fSChris Wilson struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private * dev_priv,const void * data,resource_size_t size)6558475355fSChris Wilson i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
656da1184cdSMatthew Auld const void *data, resource_size_t size)
6578475355fSChris Wilson {
6588475355fSChris Wilson struct drm_i915_gem_object *obj;
6598475355fSChris Wilson struct file *file;
660c5edd542SMatthew Wilcox (Oracle) const struct address_space_operations *aops;
661da1184cdSMatthew Auld resource_size_t offset;
6628475355fSChris Wilson int err;
6638475355fSChris Wilson
66432b7cf51SThomas Hellström GEM_WARN_ON(IS_DGFX(dev_priv));
6658475355fSChris Wilson obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
6668475355fSChris Wilson if (IS_ERR(obj))
6678475355fSChris Wilson return obj;
6688475355fSChris Wilson
6698475355fSChris Wilson GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
6708475355fSChris Wilson
6718475355fSChris Wilson file = obj->base.filp;
672c5edd542SMatthew Wilcox (Oracle) aops = file->f_mapping->a_ops;
6738475355fSChris Wilson offset = 0;
6748475355fSChris Wilson do {
6758475355fSChris Wilson unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
6768475355fSChris Wilson struct page *page;
6778475355fSChris Wilson void *pgdata, *vaddr;
6788475355fSChris Wilson
679c5edd542SMatthew Wilcox (Oracle) err = aops->write_begin(file, file->f_mapping, offset, len,
6808475355fSChris Wilson &page, &pgdata);
6818475355fSChris Wilson if (err < 0)
6828475355fSChris Wilson goto fail;
6838475355fSChris Wilson
6848475355fSChris Wilson vaddr = kmap(page);
6858475355fSChris Wilson memcpy(vaddr, data, len);
6868475355fSChris Wilson kunmap(page);
6878475355fSChris Wilson
688c5edd542SMatthew Wilcox (Oracle) err = aops->write_end(file, file->f_mapping, offset, len, len,
6898475355fSChris Wilson page, pgdata);
6908475355fSChris Wilson if (err < 0)
6918475355fSChris Wilson goto fail;
6928475355fSChris Wilson
6938475355fSChris Wilson size -= len;
6948475355fSChris Wilson data += len;
6958475355fSChris Wilson offset += len;
6968475355fSChris Wilson } while (size);
6978475355fSChris Wilson
6988475355fSChris Wilson return obj;
6998475355fSChris Wilson
7008475355fSChris Wilson fail:
7018475355fSChris Wilson i915_gem_object_put(obj);
7028475355fSChris Wilson return ERR_PTR(err);
7038475355fSChris Wilson }
704da1184cdSMatthew Auld
init_shmem(struct intel_memory_region * mem)705da1184cdSMatthew Auld static int init_shmem(struct intel_memory_region *mem)
706da1184cdSMatthew Auld {
707b499914eSTvrtko Ursulin i915_gemfs_init(mem->i915);
70838f1cb68SLukasz Fiedorowicz intel_memory_region_set_name(mem, "system");
70938f1cb68SLukasz Fiedorowicz
710b499914eSTvrtko Ursulin return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
711da1184cdSMatthew Auld }
712da1184cdSMatthew Auld
release_shmem(struct intel_memory_region * mem)7138b1f7f92SThomas Hellström static int release_shmem(struct intel_memory_region *mem)
714da1184cdSMatthew Auld {
715da1184cdSMatthew Auld i915_gemfs_fini(mem->i915);
7168b1f7f92SThomas Hellström return 0;
717da1184cdSMatthew Auld }
718da1184cdSMatthew Auld
719da1184cdSMatthew Auld static const struct intel_memory_region_ops shmem_region_ops = {
720da1184cdSMatthew Auld .init = init_shmem,
721da1184cdSMatthew Auld .release = release_shmem,
72297d55396SMatthew Auld .init_object = shmem_object_init,
723da1184cdSMatthew Auld };
724da1184cdSMatthew Auld
i915_gem_shmem_setup(struct drm_i915_private * i915,u16 type,u16 instance)725d1487389SThomas Hellström struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
726d1487389SThomas Hellström u16 type, u16 instance)
727da1184cdSMatthew Auld {
728da1184cdSMatthew Auld return intel_memory_region_create(i915, 0,
729da1184cdSMatthew Auld totalram_pages() << PAGE_SHIFT,
730235582caSMatthew Auld PAGE_SIZE, 0, 0,
731d1487389SThomas Hellström type, instance,
732da1184cdSMatthew Auld &shmem_region_ops);
733da1184cdSMatthew Auld }
73441a9c75dSChris Wilson
i915_gem_object_is_shmem(const struct drm_i915_gem_object * obj)73541a9c75dSChris Wilson bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
73641a9c75dSChris Wilson {
73741a9c75dSChris Wilson return obj->ops == &i915_gem_shmem_ops;
73841a9c75dSChris Wilson }
739