1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/pagevec.h>
8 #include <linux/swap.h>
9 
10 #include "gem/i915_gem_region.h"
11 #include "i915_drv.h"
12 #include "i915_gemfs.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
16 
17 /*
18  * Move pages to appropriate lru and release the pagevec, decrementing the
19  * ref count of those pages.
20  */
21 static void check_release_pagevec(struct pagevec *pvec)
22 {
23 	check_move_unevictable_pages(pvec);
24 	__pagevec_release(pvec);
25 	cond_resched();
26 }
27 
28 static int shmem_get_pages(struct drm_i915_gem_object *obj)
29 {
30 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
31 	struct intel_memory_region *mem = obj->mm.region;
32 	const unsigned long page_count = obj->base.size / PAGE_SIZE;
33 	unsigned long i;
34 	struct address_space *mapping;
35 	struct sg_table *st;
36 	struct scatterlist *sg;
37 	struct sgt_iter sgt_iter;
38 	struct page *page;
39 	unsigned long last_pfn = 0;	/* suppress gcc warning */
40 	unsigned int max_segment = i915_sg_segment_size();
41 	unsigned int sg_page_sizes;
42 	gfp_t noreclaim;
43 	int ret;
44 
45 	/*
46 	 * Assert that the object is not currently in any GPU domain. As it
47 	 * wasn't in the GTT, there shouldn't be any way it could have been in
48 	 * a GPU cache
49 	 */
50 	GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
51 	GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
52 
53 	/*
54 	 * If there's no chance of allocating enough pages for the whole
55 	 * object, bail early.
56 	 */
57 	if (obj->base.size > resource_size(&mem->region))
58 		return -ENOMEM;
59 
60 	st = kmalloc(sizeof(*st), GFP_KERNEL);
61 	if (!st)
62 		return -ENOMEM;
63 
64 rebuild_st:
65 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
66 		kfree(st);
67 		return -ENOMEM;
68 	}
69 
70 	/*
71 	 * Get the list of pages out of our struct file.  They'll be pinned
72 	 * at this point until we release them.
73 	 *
74 	 * Fail silently without starting the shrinker
75 	 */
76 	mapping = obj->base.filp->f_mapping;
77 	mapping_set_unevictable(mapping);
78 	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
79 	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
80 
81 	sg = st->sgl;
82 	st->nents = 0;
83 	sg_page_sizes = 0;
84 	for (i = 0; i < page_count; i++) {
85 		const unsigned int shrink[] = {
86 			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
87 			0,
88 		}, *s = shrink;
89 		gfp_t gfp = noreclaim;
90 
91 		do {
92 			cond_resched();
93 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
94 			if (!IS_ERR(page))
95 				break;
96 
97 			if (!*s) {
98 				ret = PTR_ERR(page);
99 				goto err_sg;
100 			}
101 
102 			i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
103 
104 			/*
105 			 * We've tried hard to allocate the memory by reaping
106 			 * our own buffer, now let the real VM do its job and
107 			 * go down in flames if truly OOM.
108 			 *
109 			 * However, since graphics tend to be disposable,
110 			 * defer the oom here by reporting the ENOMEM back
111 			 * to userspace.
112 			 */
113 			if (!*s) {
114 				/* reclaim and warn, but no oom */
115 				gfp = mapping_gfp_mask(mapping);
116 
117 				/*
118 				 * Our bo are always dirty and so we require
119 				 * kswapd to reclaim our pages (direct reclaim
120 				 * does not effectively begin pageout of our
121 				 * buffers on its own). However, direct reclaim
122 				 * only waits for kswapd when under allocation
123 				 * congestion. So as a result __GFP_RECLAIM is
124 				 * unreliable and fails to actually reclaim our
125 				 * dirty pages -- unless you try over and over
126 				 * again with !__GFP_NORETRY. However, we still
127 				 * want to fail this allocation rather than
128 				 * trigger the out-of-memory killer and for
129 				 * this we want __GFP_RETRY_MAYFAIL.
130 				 */
131 				gfp |= __GFP_RETRY_MAYFAIL;
132 			}
133 		} while (1);
134 
135 		if (!i ||
136 		    sg->length >= max_segment ||
137 		    page_to_pfn(page) != last_pfn + 1) {
138 			if (i) {
139 				sg_page_sizes |= sg->length;
140 				sg = sg_next(sg);
141 			}
142 			st->nents++;
143 			sg_set_page(sg, page, PAGE_SIZE, 0);
144 		} else {
145 			sg->length += PAGE_SIZE;
146 		}
147 		last_pfn = page_to_pfn(page);
148 
149 		/* Check that the i965g/gm workaround works. */
150 		GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
151 	}
152 	if (sg) { /* loop terminated early; short sg table */
153 		sg_page_sizes |= sg->length;
154 		sg_mark_end(sg);
155 	}
156 
157 	/* Trim unused sg entries to avoid wasting memory. */
158 	i915_sg_trim(st);
159 
160 	ret = i915_gem_gtt_prepare_pages(obj, st);
161 	if (ret) {
162 		/*
163 		 * DMA remapping failed? One possible cause is that
164 		 * it could not reserve enough large entries, asking
165 		 * for PAGE_SIZE chunks instead may be helpful.
166 		 */
167 		if (max_segment > PAGE_SIZE) {
168 			for_each_sgt_page(page, sgt_iter, st)
169 				put_page(page);
170 			sg_free_table(st);
171 
172 			max_segment = PAGE_SIZE;
173 			goto rebuild_st;
174 		} else {
175 			dev_warn(i915->drm.dev,
176 				 "Failed to DMA remap %lu pages\n",
177 				 page_count);
178 			goto err_pages;
179 		}
180 	}
181 
182 	if (i915_gem_object_needs_bit17_swizzle(obj))
183 		i915_gem_object_do_bit_17_swizzle(obj, st);
184 
185 	if (i915_gem_object_can_bypass_llc(obj))
186 		obj->cache_dirty = true;
187 
188 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
189 
190 	return 0;
191 
192 err_sg:
193 	sg_mark_end(sg);
194 err_pages:
195 	mapping_clear_unevictable(mapping);
196 	if (sg != st->sgl) {
197 		struct pagevec pvec;
198 
199 		pagevec_init(&pvec);
200 		for_each_sgt_page(page, sgt_iter, st) {
201 			if (!pagevec_add(&pvec, page))
202 				check_release_pagevec(&pvec);
203 		}
204 		if (pagevec_count(&pvec))
205 			check_release_pagevec(&pvec);
206 	}
207 	sg_free_table(st);
208 	kfree(st);
209 
210 	/*
211 	 * shmemfs first checks if there is enough memory to allocate the page
212 	 * and reports ENOSPC should there be insufficient, along with the usual
213 	 * ENOMEM for a genuine allocation failure.
214 	 *
215 	 * We use ENOSPC in our driver to mean that we have run out of aperture
216 	 * space and so want to translate the error from shmemfs back to our
217 	 * usual understanding of ENOMEM.
218 	 */
219 	if (ret == -ENOSPC)
220 		ret = -ENOMEM;
221 
222 	return ret;
223 }
224 
225 static void
226 shmem_truncate(struct drm_i915_gem_object *obj)
227 {
228 	/*
229 	 * Our goal here is to return as much of the memory as
230 	 * is possible back to the system as we are called from OOM.
231 	 * To do this we must instruct the shmfs to drop all of its
232 	 * backing pages, *now*.
233 	 */
234 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
235 	obj->mm.madv = __I915_MADV_PURGED;
236 	obj->mm.pages = ERR_PTR(-EFAULT);
237 }
238 
239 static void
240 shmem_writeback(struct drm_i915_gem_object *obj)
241 {
242 	struct address_space *mapping;
243 	struct writeback_control wbc = {
244 		.sync_mode = WB_SYNC_NONE,
245 		.nr_to_write = SWAP_CLUSTER_MAX,
246 		.range_start = 0,
247 		.range_end = LLONG_MAX,
248 		.for_reclaim = 1,
249 	};
250 	unsigned long i;
251 
252 	/*
253 	 * Leave mmapings intact (GTT will have been revoked on unbinding,
254 	 * leaving only CPU mmapings around) and add those pages to the LRU
255 	 * instead of invoking writeback so they are aged and paged out
256 	 * as normal.
257 	 */
258 	mapping = obj->base.filp->f_mapping;
259 
260 	/* Begin writeback on each dirty page */
261 	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
262 		struct page *page;
263 
264 		page = find_lock_page(mapping, i);
265 		if (!page)
266 			continue;
267 
268 		if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
269 			int ret;
270 
271 			SetPageReclaim(page);
272 			ret = mapping->a_ops->writepage(page, &wbc);
273 			if (!PageWriteback(page))
274 				ClearPageReclaim(page);
275 			if (!ret)
276 				goto put;
277 		}
278 		unlock_page(page);
279 put:
280 		put_page(page);
281 	}
282 }
283 
284 void
285 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
286 				struct sg_table *pages,
287 				bool needs_clflush)
288 {
289 	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
290 
291 	if (obj->mm.madv == I915_MADV_DONTNEED)
292 		obj->mm.dirty = false;
293 
294 	if (needs_clflush &&
295 	    (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
296 	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
297 		drm_clflush_sg(pages);
298 
299 	__start_cpu_write(obj);
300 }
301 
302 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
303 {
304 	struct sgt_iter sgt_iter;
305 	struct pagevec pvec;
306 	struct page *page;
307 
308 	GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
309 	__i915_gem_object_release_shmem(obj, pages, true);
310 
311 	i915_gem_gtt_finish_pages(obj, pages);
312 
313 	if (i915_gem_object_needs_bit17_swizzle(obj))
314 		i915_gem_object_save_bit_17_swizzle(obj, pages);
315 
316 	mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
317 
318 	pagevec_init(&pvec);
319 	for_each_sgt_page(page, sgt_iter, pages) {
320 		if (obj->mm.dirty)
321 			set_page_dirty(page);
322 
323 		if (obj->mm.madv == I915_MADV_WILLNEED)
324 			mark_page_accessed(page);
325 
326 		if (!pagevec_add(&pvec, page))
327 			check_release_pagevec(&pvec);
328 	}
329 	if (pagevec_count(&pvec))
330 		check_release_pagevec(&pvec);
331 	obj->mm.dirty = false;
332 
333 	sg_free_table(pages);
334 	kfree(pages);
335 }
336 
337 static void
338 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
339 {
340 	if (likely(i915_gem_object_has_struct_page(obj)))
341 		i915_gem_object_put_pages_shmem(obj, pages);
342 	else
343 		i915_gem_object_put_pages_phys(obj, pages);
344 }
345 
346 static int
347 shmem_pwrite(struct drm_i915_gem_object *obj,
348 	     const struct drm_i915_gem_pwrite *arg)
349 {
350 	struct address_space *mapping = obj->base.filp->f_mapping;
351 	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
352 	u64 remain, offset;
353 	unsigned int pg;
354 
355 	/* Caller already validated user args */
356 	GEM_BUG_ON(!access_ok(user_data, arg->size));
357 
358 	if (!i915_gem_object_has_struct_page(obj))
359 		return i915_gem_object_pwrite_phys(obj, arg);
360 
361 	/*
362 	 * Before we instantiate/pin the backing store for our use, we
363 	 * can prepopulate the shmemfs filp efficiently using a write into
364 	 * the pagecache. We avoid the penalty of instantiating all the
365 	 * pages, important if the user is just writing to a few and never
366 	 * uses the object on the GPU, and using a direct write into shmemfs
367 	 * allows it to avoid the cost of retrieving a page (either swapin
368 	 * or clearing-before-use) before it is overwritten.
369 	 */
370 	if (i915_gem_object_has_pages(obj))
371 		return -ENODEV;
372 
373 	if (obj->mm.madv != I915_MADV_WILLNEED)
374 		return -EFAULT;
375 
376 	/*
377 	 * Before the pages are instantiated the object is treated as being
378 	 * in the CPU domain. The pages will be clflushed as required before
379 	 * use, and we can freely write into the pages directly. If userspace
380 	 * races pwrite with any other operation; corruption will ensue -
381 	 * that is userspace's prerogative!
382 	 */
383 
384 	remain = arg->size;
385 	offset = arg->offset;
386 	pg = offset_in_page(offset);
387 
388 	do {
389 		unsigned int len, unwritten;
390 		struct page *page;
391 		void *data, *vaddr;
392 		int err;
393 		char c;
394 
395 		len = PAGE_SIZE - pg;
396 		if (len > remain)
397 			len = remain;
398 
399 		/* Prefault the user page to reduce potential recursion */
400 		err = __get_user(c, user_data);
401 		if (err)
402 			return err;
403 
404 		err = __get_user(c, user_data + len - 1);
405 		if (err)
406 			return err;
407 
408 		err = pagecache_write_begin(obj->base.filp, mapping,
409 					    offset, len, 0,
410 					    &page, &data);
411 		if (err < 0)
412 			return err;
413 
414 		vaddr = kmap_atomic(page);
415 		unwritten = __copy_from_user_inatomic(vaddr + pg,
416 						      user_data,
417 						      len);
418 		kunmap_atomic(vaddr);
419 
420 		err = pagecache_write_end(obj->base.filp, mapping,
421 					  offset, len, len - unwritten,
422 					  page, data);
423 		if (err < 0)
424 			return err;
425 
426 		/* We don't handle -EFAULT, leave it to the caller to check */
427 		if (unwritten)
428 			return -ENODEV;
429 
430 		remain -= len;
431 		user_data += len;
432 		offset += len;
433 		pg = 0;
434 	} while (remain);
435 
436 	return 0;
437 }
438 
439 static int
440 shmem_pread(struct drm_i915_gem_object *obj,
441 	    const struct drm_i915_gem_pread *arg)
442 {
443 	if (!i915_gem_object_has_struct_page(obj))
444 		return i915_gem_object_pread_phys(obj, arg);
445 
446 	return -ENODEV;
447 }
448 
449 static void shmem_release(struct drm_i915_gem_object *obj)
450 {
451 	if (i915_gem_object_has_struct_page(obj))
452 		i915_gem_object_release_memory_region(obj);
453 
454 	fput(obj->base.filp);
455 }
456 
457 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
458 	.name = "i915_gem_object_shmem",
459 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
460 
461 	.get_pages = shmem_get_pages,
462 	.put_pages = shmem_put_pages,
463 	.truncate = shmem_truncate,
464 	.writeback = shmem_writeback,
465 
466 	.pwrite = shmem_pwrite,
467 	.pread = shmem_pread,
468 
469 	.release = shmem_release,
470 };
471 
472 static int __create_shmem(struct drm_i915_private *i915,
473 			  struct drm_gem_object *obj,
474 			  resource_size_t size)
475 {
476 	unsigned long flags = VM_NORESERVE;
477 	struct file *filp;
478 
479 	drm_gem_private_object_init(&i915->drm, obj, size);
480 
481 	if (i915->mm.gemfs)
482 		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
483 						 flags);
484 	else
485 		filp = shmem_file_setup("i915", size, flags);
486 	if (IS_ERR(filp))
487 		return PTR_ERR(filp);
488 
489 	obj->filp = filp;
490 	return 0;
491 }
492 
493 static int shmem_object_init(struct intel_memory_region *mem,
494 			     struct drm_i915_gem_object *obj,
495 			     resource_size_t size,
496 			     resource_size_t page_size,
497 			     unsigned int flags)
498 {
499 	static struct lock_class_key lock_class;
500 	struct drm_i915_private *i915 = mem->i915;
501 	struct address_space *mapping;
502 	unsigned int cache_level;
503 	gfp_t mask;
504 	int ret;
505 
506 	ret = __create_shmem(i915, &obj->base, size);
507 	if (ret)
508 		return ret;
509 
510 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
511 	if (IS_I965GM(i915) || IS_I965G(i915)) {
512 		/* 965gm cannot relocate objects above 4GiB. */
513 		mask &= ~__GFP_HIGHMEM;
514 		mask |= __GFP_DMA32;
515 	}
516 
517 	mapping = obj->base.filp->f_mapping;
518 	mapping_set_gfp_mask(mapping, mask);
519 	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
520 
521 	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
522 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
523 	obj->write_domain = I915_GEM_DOMAIN_CPU;
524 	obj->read_domains = I915_GEM_DOMAIN_CPU;
525 
526 	if (HAS_LLC(i915))
527 		/* On some devices, we can have the GPU use the LLC (the CPU
528 		 * cache) for about a 10% performance improvement
529 		 * compared to uncached.  Graphics requests other than
530 		 * display scanout are coherent with the CPU in
531 		 * accessing this cache.  This means in this mode we
532 		 * don't need to clflush on the CPU side, and on the
533 		 * GPU side we only need to flush internal caches to
534 		 * get data visible to the CPU.
535 		 *
536 		 * However, we maintain the display planes as UC, and so
537 		 * need to rebind when first used as such.
538 		 */
539 		cache_level = I915_CACHE_LLC;
540 	else
541 		cache_level = I915_CACHE_NONE;
542 
543 	i915_gem_object_set_cache_coherency(obj, cache_level);
544 
545 	i915_gem_object_init_memory_region(obj, mem);
546 
547 	return 0;
548 }
549 
550 struct drm_i915_gem_object *
551 i915_gem_object_create_shmem(struct drm_i915_private *i915,
552 			     resource_size_t size)
553 {
554 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
555 					     size, 0, 0);
556 }
557 
558 /* Allocate a new GEM object and fill it with the supplied data */
559 struct drm_i915_gem_object *
560 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
561 				       const void *data, resource_size_t size)
562 {
563 	struct drm_i915_gem_object *obj;
564 	struct file *file;
565 	resource_size_t offset;
566 	int err;
567 
568 	GEM_WARN_ON(IS_DGFX(dev_priv));
569 	obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
570 	if (IS_ERR(obj))
571 		return obj;
572 
573 	GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
574 
575 	file = obj->base.filp;
576 	offset = 0;
577 	do {
578 		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
579 		struct page *page;
580 		void *pgdata, *vaddr;
581 
582 		err = pagecache_write_begin(file, file->f_mapping,
583 					    offset, len, 0,
584 					    &page, &pgdata);
585 		if (err < 0)
586 			goto fail;
587 
588 		vaddr = kmap(page);
589 		memcpy(vaddr, data, len);
590 		kunmap(page);
591 
592 		err = pagecache_write_end(file, file->f_mapping,
593 					  offset, len, len,
594 					  page, pgdata);
595 		if (err < 0)
596 			goto fail;
597 
598 		size -= len;
599 		data += len;
600 		offset += len;
601 	} while (size);
602 
603 	return obj;
604 
605 fail:
606 	i915_gem_object_put(obj);
607 	return ERR_PTR(err);
608 }
609 
610 static int init_shmem(struct intel_memory_region *mem)
611 {
612 	int err;
613 
614 	err = i915_gemfs_init(mem->i915);
615 	if (err) {
616 		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
617 			 err);
618 	}
619 
620 	intel_memory_region_set_name(mem, "system");
621 
622 	return 0; /* Don't error, we can simply fallback to the kernel mnt */
623 }
624 
625 static void release_shmem(struct intel_memory_region *mem)
626 {
627 	i915_gemfs_fini(mem->i915);
628 }
629 
630 static const struct intel_memory_region_ops shmem_region_ops = {
631 	.init = init_shmem,
632 	.release = release_shmem,
633 	.init_object = shmem_object_init,
634 };
635 
636 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
637 						 u16 type, u16 instance)
638 {
639 	return intel_memory_region_create(i915, 0,
640 					  totalram_pages() << PAGE_SHIFT,
641 					  PAGE_SIZE, 0,
642 					  type, instance,
643 					  &shmem_region_ops);
644 }
645 
646 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
647 {
648 	return obj->ops == &i915_gem_shmem_ops;
649 }
650