xref: /openbmc/linux/drivers/gpu/drm/i915/gem/i915_gem_shmem.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/pagevec.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm_cache.h>
12 
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_tiling.h"
17 #include "i915_gemfs.h"
18 #include "i915_scatterlist.h"
19 #include "i915_trace.h"
20 
21 /*
22  * Move folios to appropriate lru and release the batch, decrementing the
23  * ref count of those folios.
24  */
check_release_folio_batch(struct folio_batch * fbatch)25 static void check_release_folio_batch(struct folio_batch *fbatch)
26 {
27 	check_move_unevictable_folios(fbatch);
28 	__folio_batch_release(fbatch);
29 	cond_resched();
30 }
31 
shmem_sg_free_table(struct sg_table * st,struct address_space * mapping,bool dirty,bool backup)32 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
33 			 bool dirty, bool backup)
34 {
35 	struct sgt_iter sgt_iter;
36 	struct folio_batch fbatch;
37 	struct folio *last = NULL;
38 	struct page *page;
39 
40 	mapping_clear_unevictable(mapping);
41 
42 	folio_batch_init(&fbatch);
43 	for_each_sgt_page(page, sgt_iter, st) {
44 		struct folio *folio = page_folio(page);
45 
46 		if (folio == last)
47 			continue;
48 		last = folio;
49 		if (dirty)
50 			folio_mark_dirty(folio);
51 		if (backup)
52 			folio_mark_accessed(folio);
53 
54 		if (!folio_batch_add(&fbatch, folio))
55 			check_release_folio_batch(&fbatch);
56 	}
57 	if (fbatch.nr)
58 		check_release_folio_batch(&fbatch);
59 
60 	sg_free_table(st);
61 }
62 
shmem_sg_alloc_table(struct drm_i915_private * i915,struct sg_table * st,size_t size,struct intel_memory_region * mr,struct address_space * mapping,unsigned int max_segment)63 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
64 			 size_t size, struct intel_memory_region *mr,
65 			 struct address_space *mapping,
66 			 unsigned int max_segment)
67 {
68 	unsigned int page_count; /* restricted by sg_alloc_table */
69 	unsigned long i;
70 	struct scatterlist *sg;
71 	unsigned long next_pfn = 0;	/* suppress gcc warning */
72 	gfp_t noreclaim;
73 	int ret;
74 
75 	if (overflows_type(size / PAGE_SIZE, page_count))
76 		return -E2BIG;
77 
78 	page_count = size / PAGE_SIZE;
79 	/*
80 	 * If there's no chance of allocating enough pages for the whole
81 	 * object, bail early.
82 	 */
83 	if (size > resource_size(&mr->region))
84 		return -ENOMEM;
85 
86 	if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
87 		return -ENOMEM;
88 
89 	/*
90 	 * Get the list of pages out of our struct file.  They'll be pinned
91 	 * at this point until we release them.
92 	 *
93 	 * Fail silently without starting the shrinker
94 	 */
95 	mapping_set_unevictable(mapping);
96 	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
97 	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
98 
99 	sg = st->sgl;
100 	st->nents = 0;
101 	for (i = 0; i < page_count; i++) {
102 		struct folio *folio;
103 		unsigned long nr_pages;
104 		const unsigned int shrink[] = {
105 			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
106 			0,
107 		}, *s = shrink;
108 		gfp_t gfp = noreclaim;
109 
110 		do {
111 			cond_resched();
112 			folio = shmem_read_folio_gfp(mapping, i, gfp);
113 			if (!IS_ERR(folio))
114 				break;
115 
116 			if (!*s) {
117 				ret = PTR_ERR(folio);
118 				goto err_sg;
119 			}
120 
121 			i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
122 
123 			/*
124 			 * We've tried hard to allocate the memory by reaping
125 			 * our own buffer, now let the real VM do its job and
126 			 * go down in flames if truly OOM.
127 			 *
128 			 * However, since graphics tend to be disposable,
129 			 * defer the oom here by reporting the ENOMEM back
130 			 * to userspace.
131 			 */
132 			if (!*s) {
133 				/* reclaim and warn, but no oom */
134 				gfp = mapping_gfp_mask(mapping);
135 
136 				/*
137 				 * Our bo are always dirty and so we require
138 				 * kswapd to reclaim our pages (direct reclaim
139 				 * does not effectively begin pageout of our
140 				 * buffers on its own). However, direct reclaim
141 				 * only waits for kswapd when under allocation
142 				 * congestion. So as a result __GFP_RECLAIM is
143 				 * unreliable and fails to actually reclaim our
144 				 * dirty pages -- unless you try over and over
145 				 * again with !__GFP_NORETRY. However, we still
146 				 * want to fail this allocation rather than
147 				 * trigger the out-of-memory killer and for
148 				 * this we want __GFP_RETRY_MAYFAIL.
149 				 */
150 				gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
151 			}
152 		} while (1);
153 
154 		nr_pages = min_t(unsigned long,
155 				folio_nr_pages(folio), page_count - i);
156 		if (!i ||
157 		    sg->length >= max_segment ||
158 		    folio_pfn(folio) != next_pfn) {
159 			if (i)
160 				sg = sg_next(sg);
161 
162 			st->nents++;
163 			sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
164 		} else {
165 			/* XXX: could overflow? */
166 			sg->length += nr_pages * PAGE_SIZE;
167 		}
168 		next_pfn = folio_pfn(folio) + nr_pages;
169 		i += nr_pages - 1;
170 
171 		/* Check that the i965g/gm workaround works. */
172 		GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
173 	}
174 	if (sg) /* loop terminated early; short sg table */
175 		sg_mark_end(sg);
176 
177 	/* Trim unused sg entries to avoid wasting memory. */
178 	i915_sg_trim(st);
179 
180 	return 0;
181 err_sg:
182 	sg_mark_end(sg);
183 	if (sg != st->sgl) {
184 		shmem_sg_free_table(st, mapping, false, false);
185 	} else {
186 		mapping_clear_unevictable(mapping);
187 		sg_free_table(st);
188 	}
189 
190 	/*
191 	 * shmemfs first checks if there is enough memory to allocate the page
192 	 * and reports ENOSPC should there be insufficient, along with the usual
193 	 * ENOMEM for a genuine allocation failure.
194 	 *
195 	 * We use ENOSPC in our driver to mean that we have run out of aperture
196 	 * space and so want to translate the error from shmemfs back to our
197 	 * usual understanding of ENOMEM.
198 	 */
199 	if (ret == -ENOSPC)
200 		ret = -ENOMEM;
201 
202 	return ret;
203 }
204 
shmem_get_pages(struct drm_i915_gem_object * obj)205 static int shmem_get_pages(struct drm_i915_gem_object *obj)
206 {
207 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
208 	struct intel_memory_region *mem = obj->mm.region;
209 	struct address_space *mapping = obj->base.filp->f_mapping;
210 	unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
211 	struct sg_table *st;
212 	int ret;
213 
214 	/*
215 	 * Assert that the object is not currently in any GPU domain. As it
216 	 * wasn't in the GTT, there shouldn't be any way it could have been in
217 	 * a GPU cache
218 	 */
219 	GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
220 	GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
221 
222 rebuild_st:
223 	st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
224 	if (!st)
225 		return -ENOMEM;
226 
227 	ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
228 				   max_segment);
229 	if (ret)
230 		goto err_st;
231 
232 	ret = i915_gem_gtt_prepare_pages(obj, st);
233 	if (ret) {
234 		/*
235 		 * DMA remapping failed? One possible cause is that
236 		 * it could not reserve enough large entries, asking
237 		 * for PAGE_SIZE chunks instead may be helpful.
238 		 */
239 		if (max_segment > PAGE_SIZE) {
240 			shmem_sg_free_table(st, mapping, false, false);
241 			kfree(st);
242 
243 			max_segment = PAGE_SIZE;
244 			goto rebuild_st;
245 		} else {
246 			dev_warn(i915->drm.dev,
247 				 "Failed to DMA remap %zu pages\n",
248 				 obj->base.size >> PAGE_SHIFT);
249 			goto err_pages;
250 		}
251 	}
252 
253 	if (i915_gem_object_needs_bit17_swizzle(obj))
254 		i915_gem_object_do_bit_17_swizzle(obj, st);
255 
256 	if (i915_gem_object_can_bypass_llc(obj))
257 		obj->cache_dirty = true;
258 
259 	__i915_gem_object_set_pages(obj, st);
260 
261 	return 0;
262 
263 err_pages:
264 	shmem_sg_free_table(st, mapping, false, false);
265 	/*
266 	 * shmemfs first checks if there is enough memory to allocate the page
267 	 * and reports ENOSPC should there be insufficient, along with the usual
268 	 * ENOMEM for a genuine allocation failure.
269 	 *
270 	 * We use ENOSPC in our driver to mean that we have run out of aperture
271 	 * space and so want to translate the error from shmemfs back to our
272 	 * usual understanding of ENOMEM.
273 	 */
274 err_st:
275 	if (ret == -ENOSPC)
276 		ret = -ENOMEM;
277 
278 	kfree(st);
279 
280 	return ret;
281 }
282 
283 static int
shmem_truncate(struct drm_i915_gem_object * obj)284 shmem_truncate(struct drm_i915_gem_object *obj)
285 {
286 	/*
287 	 * Our goal here is to return as much of the memory as
288 	 * is possible back to the system as we are called from OOM.
289 	 * To do this we must instruct the shmfs to drop all of its
290 	 * backing pages, *now*.
291 	 */
292 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
293 	obj->mm.madv = __I915_MADV_PURGED;
294 	obj->mm.pages = ERR_PTR(-EFAULT);
295 
296 	return 0;
297 }
298 
__shmem_writeback(size_t size,struct address_space * mapping)299 void __shmem_writeback(size_t size, struct address_space *mapping)
300 {
301 	struct writeback_control wbc = {
302 		.sync_mode = WB_SYNC_NONE,
303 		.nr_to_write = SWAP_CLUSTER_MAX,
304 		.range_start = 0,
305 		.range_end = LLONG_MAX,
306 		.for_reclaim = 1,
307 	};
308 	unsigned long i;
309 
310 	/*
311 	 * Leave mmapings intact (GTT will have been revoked on unbinding,
312 	 * leaving only CPU mmapings around) and add those pages to the LRU
313 	 * instead of invoking writeback so they are aged and paged out
314 	 * as normal.
315 	 */
316 
317 	/* Begin writeback on each dirty page */
318 	for (i = 0; i < size >> PAGE_SHIFT; i++) {
319 		struct page *page;
320 
321 		page = find_lock_page(mapping, i);
322 		if (!page)
323 			continue;
324 
325 		if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
326 			int ret;
327 
328 			SetPageReclaim(page);
329 			ret = mapping->a_ops->writepage(page, &wbc);
330 			if (!PageWriteback(page))
331 				ClearPageReclaim(page);
332 			if (!ret)
333 				goto put;
334 		}
335 		unlock_page(page);
336 put:
337 		put_page(page);
338 	}
339 }
340 
341 static void
shmem_writeback(struct drm_i915_gem_object * obj)342 shmem_writeback(struct drm_i915_gem_object *obj)
343 {
344 	__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
345 }
346 
shmem_shrink(struct drm_i915_gem_object * obj,unsigned int flags)347 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
348 {
349 	switch (obj->mm.madv) {
350 	case I915_MADV_DONTNEED:
351 		return i915_gem_object_truncate(obj);
352 	case __I915_MADV_PURGED:
353 		return 0;
354 	}
355 
356 	if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
357 		shmem_writeback(obj);
358 
359 	return 0;
360 }
361 
362 void
__i915_gem_object_release_shmem(struct drm_i915_gem_object * obj,struct sg_table * pages,bool needs_clflush)363 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
364 				struct sg_table *pages,
365 				bool needs_clflush)
366 {
367 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
368 
369 	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
370 
371 	if (obj->mm.madv == I915_MADV_DONTNEED)
372 		obj->mm.dirty = false;
373 
374 	if (needs_clflush &&
375 	    (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
376 	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
377 		drm_clflush_sg(pages);
378 
379 	__start_cpu_write(obj);
380 	/*
381 	 * On non-LLC igfx platforms, force the flush-on-acquire if this is ever
382 	 * swapped-in. Our async flush path is not trust worthy enough yet(and
383 	 * happens in the wrong order), and with some tricks it's conceivable
384 	 * for userspace to change the cache-level to I915_CACHE_NONE after the
385 	 * pages are swapped-in, and since execbuf binds the object before doing
386 	 * the async flush, we have a race window.
387 	 */
388 	if (!HAS_LLC(i915) && !IS_DGFX(i915))
389 		obj->cache_dirty = true;
390 }
391 
i915_gem_object_put_pages_shmem(struct drm_i915_gem_object * obj,struct sg_table * pages)392 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
393 {
394 	__i915_gem_object_release_shmem(obj, pages, true);
395 
396 	i915_gem_gtt_finish_pages(obj, pages);
397 
398 	if (i915_gem_object_needs_bit17_swizzle(obj))
399 		i915_gem_object_save_bit_17_swizzle(obj, pages);
400 
401 	shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
402 			    obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
403 	kfree(pages);
404 	obj->mm.dirty = false;
405 }
406 
407 static void
shmem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)408 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
409 {
410 	if (likely(i915_gem_object_has_struct_page(obj)))
411 		i915_gem_object_put_pages_shmem(obj, pages);
412 	else
413 		i915_gem_object_put_pages_phys(obj, pages);
414 }
415 
416 static int
shmem_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * arg)417 shmem_pwrite(struct drm_i915_gem_object *obj,
418 	     const struct drm_i915_gem_pwrite *arg)
419 {
420 	struct address_space *mapping = obj->base.filp->f_mapping;
421 	const struct address_space_operations *aops = mapping->a_ops;
422 	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
423 	u64 remain, offset;
424 	unsigned int pg;
425 
426 	/* Caller already validated user args */
427 	GEM_BUG_ON(!access_ok(user_data, arg->size));
428 
429 	if (!i915_gem_object_has_struct_page(obj))
430 		return i915_gem_object_pwrite_phys(obj, arg);
431 
432 	/*
433 	 * Before we instantiate/pin the backing store for our use, we
434 	 * can prepopulate the shmemfs filp efficiently using a write into
435 	 * the pagecache. We avoid the penalty of instantiating all the
436 	 * pages, important if the user is just writing to a few and never
437 	 * uses the object on the GPU, and using a direct write into shmemfs
438 	 * allows it to avoid the cost of retrieving a page (either swapin
439 	 * or clearing-before-use) before it is overwritten.
440 	 */
441 	if (i915_gem_object_has_pages(obj))
442 		return -ENODEV;
443 
444 	if (obj->mm.madv != I915_MADV_WILLNEED)
445 		return -EFAULT;
446 
447 	/*
448 	 * Before the pages are instantiated the object is treated as being
449 	 * in the CPU domain. The pages will be clflushed as required before
450 	 * use, and we can freely write into the pages directly. If userspace
451 	 * races pwrite with any other operation; corruption will ensue -
452 	 * that is userspace's prerogative!
453 	 */
454 
455 	remain = arg->size;
456 	offset = arg->offset;
457 	pg = offset_in_page(offset);
458 
459 	do {
460 		unsigned int len, unwritten;
461 		struct page *page;
462 		void *data, *vaddr;
463 		int err;
464 		char __maybe_unused c;
465 
466 		len = PAGE_SIZE - pg;
467 		if (len > remain)
468 			len = remain;
469 
470 		/* Prefault the user page to reduce potential recursion */
471 		err = __get_user(c, user_data);
472 		if (err)
473 			return err;
474 
475 		err = __get_user(c, user_data + len - 1);
476 		if (err)
477 			return err;
478 
479 		err = aops->write_begin(obj->base.filp, mapping, offset, len,
480 					&page, &data);
481 		if (err < 0)
482 			return err;
483 
484 		vaddr = kmap_atomic(page);
485 		unwritten = __copy_from_user_inatomic(vaddr + pg,
486 						      user_data,
487 						      len);
488 		kunmap_atomic(vaddr);
489 
490 		err = aops->write_end(obj->base.filp, mapping, offset, len,
491 				      len - unwritten, page, data);
492 		if (err < 0)
493 			return err;
494 
495 		/* We don't handle -EFAULT, leave it to the caller to check */
496 		if (unwritten)
497 			return -ENODEV;
498 
499 		remain -= len;
500 		user_data += len;
501 		offset += len;
502 		pg = 0;
503 	} while (remain);
504 
505 	return 0;
506 }
507 
508 static int
shmem_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * arg)509 shmem_pread(struct drm_i915_gem_object *obj,
510 	    const struct drm_i915_gem_pread *arg)
511 {
512 	if (!i915_gem_object_has_struct_page(obj))
513 		return i915_gem_object_pread_phys(obj, arg);
514 
515 	return -ENODEV;
516 }
517 
shmem_release(struct drm_i915_gem_object * obj)518 static void shmem_release(struct drm_i915_gem_object *obj)
519 {
520 	if (i915_gem_object_has_struct_page(obj))
521 		i915_gem_object_release_memory_region(obj);
522 
523 	fput(obj->base.filp);
524 }
525 
526 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
527 	.name = "i915_gem_object_shmem",
528 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
529 
530 	.get_pages = shmem_get_pages,
531 	.put_pages = shmem_put_pages,
532 	.truncate = shmem_truncate,
533 	.shrink = shmem_shrink,
534 
535 	.pwrite = shmem_pwrite,
536 	.pread = shmem_pread,
537 
538 	.release = shmem_release,
539 };
540 
__create_shmem(struct drm_i915_private * i915,struct drm_gem_object * obj,resource_size_t size)541 static int __create_shmem(struct drm_i915_private *i915,
542 			  struct drm_gem_object *obj,
543 			  resource_size_t size)
544 {
545 	unsigned long flags = VM_NORESERVE;
546 	struct file *filp;
547 
548 	drm_gem_private_object_init(&i915->drm, obj, size);
549 
550 	/* XXX: The __shmem_file_setup() function returns -EINVAL if size is
551 	 * greater than MAX_LFS_FILESIZE.
552 	 * To handle the same error as other code that returns -E2BIG when
553 	 * the size is too large, we add a code that returns -E2BIG when the
554 	 * size is larger than the size that can be handled.
555 	 * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
556 	 * so we only needs to check when BITS_PER_LONG is 64.
557 	 * If BITS_PER_LONG is 32, E2BIG checks are processed when
558 	 * i915_gem_object_size_2big() is called before init_object() callback
559 	 * is called.
560 	 */
561 	if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
562 		return -E2BIG;
563 
564 	if (i915->mm.gemfs)
565 		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
566 						 flags);
567 	else
568 		filp = shmem_file_setup("i915", size, flags);
569 	if (IS_ERR(filp))
570 		return PTR_ERR(filp);
571 
572 	obj->filp = filp;
573 	return 0;
574 }
575 
shmem_object_init(struct intel_memory_region * mem,struct drm_i915_gem_object * obj,resource_size_t offset,resource_size_t size,resource_size_t page_size,unsigned int flags)576 static int shmem_object_init(struct intel_memory_region *mem,
577 			     struct drm_i915_gem_object *obj,
578 			     resource_size_t offset,
579 			     resource_size_t size,
580 			     resource_size_t page_size,
581 			     unsigned int flags)
582 {
583 	static struct lock_class_key lock_class;
584 	struct drm_i915_private *i915 = mem->i915;
585 	struct address_space *mapping;
586 	unsigned int cache_level;
587 	gfp_t mask;
588 	int ret;
589 
590 	ret = __create_shmem(i915, &obj->base, size);
591 	if (ret)
592 		return ret;
593 
594 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
595 	if (IS_I965GM(i915) || IS_I965G(i915)) {
596 		/* 965gm cannot relocate objects above 4GiB. */
597 		mask &= ~__GFP_HIGHMEM;
598 		mask |= __GFP_DMA32;
599 	}
600 
601 	mapping = obj->base.filp->f_mapping;
602 	mapping_set_gfp_mask(mapping, mask);
603 	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
604 
605 	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
606 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
607 	obj->write_domain = I915_GEM_DOMAIN_CPU;
608 	obj->read_domains = I915_GEM_DOMAIN_CPU;
609 
610 	/*
611 	 * MTL doesn't snoop CPU cache by default for GPU access (namely
612 	 * 1-way coherency). However some UMD's are currently depending on
613 	 * that. Make 1-way coherent the default setting for MTL. A follow
614 	 * up patch will extend the GEM_CREATE uAPI to allow UMD's specify
615 	 * caching mode at BO creation time
616 	 */
617 	if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)))
618 		/* On some devices, we can have the GPU use the LLC (the CPU
619 		 * cache) for about a 10% performance improvement
620 		 * compared to uncached.  Graphics requests other than
621 		 * display scanout are coherent with the CPU in
622 		 * accessing this cache.  This means in this mode we
623 		 * don't need to clflush on the CPU side, and on the
624 		 * GPU side we only need to flush internal caches to
625 		 * get data visible to the CPU.
626 		 *
627 		 * However, we maintain the display planes as UC, and so
628 		 * need to rebind when first used as such.
629 		 */
630 		cache_level = I915_CACHE_LLC;
631 	else
632 		cache_level = I915_CACHE_NONE;
633 
634 	i915_gem_object_set_cache_coherency(obj, cache_level);
635 
636 	i915_gem_object_init_memory_region(obj, mem);
637 
638 	return 0;
639 }
640 
641 struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private * i915,resource_size_t size)642 i915_gem_object_create_shmem(struct drm_i915_private *i915,
643 			     resource_size_t size)
644 {
645 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
646 					     size, 0, 0);
647 }
648 
649 /* Allocate a new GEM object and fill it with the supplied data */
650 struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private * dev_priv,const void * data,resource_size_t size)651 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
652 				       const void *data, resource_size_t size)
653 {
654 	struct drm_i915_gem_object *obj;
655 	struct file *file;
656 	const struct address_space_operations *aops;
657 	resource_size_t offset;
658 	int err;
659 
660 	GEM_WARN_ON(IS_DGFX(dev_priv));
661 	obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
662 	if (IS_ERR(obj))
663 		return obj;
664 
665 	GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
666 
667 	file = obj->base.filp;
668 	aops = file->f_mapping->a_ops;
669 	offset = 0;
670 	do {
671 		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
672 		struct page *page;
673 		void *pgdata, *vaddr;
674 
675 		err = aops->write_begin(file, file->f_mapping, offset, len,
676 					&page, &pgdata);
677 		if (err < 0)
678 			goto fail;
679 
680 		vaddr = kmap(page);
681 		memcpy(vaddr, data, len);
682 		kunmap(page);
683 
684 		err = aops->write_end(file, file->f_mapping, offset, len, len,
685 				      page, pgdata);
686 		if (err < 0)
687 			goto fail;
688 
689 		size -= len;
690 		data += len;
691 		offset += len;
692 	} while (size);
693 
694 	return obj;
695 
696 fail:
697 	i915_gem_object_put(obj);
698 	return ERR_PTR(err);
699 }
700 
init_shmem(struct intel_memory_region * mem)701 static int init_shmem(struct intel_memory_region *mem)
702 {
703 	i915_gemfs_init(mem->i915);
704 	intel_memory_region_set_name(mem, "system");
705 
706 	return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
707 }
708 
release_shmem(struct intel_memory_region * mem)709 static int release_shmem(struct intel_memory_region *mem)
710 {
711 	i915_gemfs_fini(mem->i915);
712 	return 0;
713 }
714 
715 static const struct intel_memory_region_ops shmem_region_ops = {
716 	.init = init_shmem,
717 	.release = release_shmem,
718 	.init_object = shmem_object_init,
719 };
720 
i915_gem_shmem_setup(struct drm_i915_private * i915,u16 type,u16 instance)721 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
722 						 u16 type, u16 instance)
723 {
724 	return intel_memory_region_create(i915, 0,
725 					  totalram_pages() << PAGE_SHIFT,
726 					  PAGE_SIZE, 0, 0,
727 					  type, instance,
728 					  &shmem_region_ops);
729 }
730 
i915_gem_object_is_shmem(const struct drm_i915_gem_object * obj)731 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
732 {
733 	return obj->ops == &i915_gem_shmem_ops;
734 }
735