1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/pagevec.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm_cache.h>
12 
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_tiling.h"
17 #include "i915_gemfs.h"
18 #include "i915_scatterlist.h"
19 #include "i915_trace.h"
20 
21 /*
22  * Move pages to appropriate lru and release the pagevec, decrementing the
23  * ref count of those pages.
24  */
25 static void check_release_pagevec(struct pagevec *pvec)
26 {
27 	check_move_unevictable_pages(pvec);
28 	__pagevec_release(pvec);
29 	cond_resched();
30 }
31 
32 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
33 			 bool dirty, bool backup)
34 {
35 	struct sgt_iter sgt_iter;
36 	struct pagevec pvec;
37 	struct page *page;
38 
39 	mapping_clear_unevictable(mapping);
40 
41 	pagevec_init(&pvec);
42 	for_each_sgt_page(page, sgt_iter, st) {
43 		if (dirty)
44 			set_page_dirty(page);
45 
46 		if (backup)
47 			mark_page_accessed(page);
48 
49 		if (!pagevec_add(&pvec, page))
50 			check_release_pagevec(&pvec);
51 	}
52 	if (pagevec_count(&pvec))
53 		check_release_pagevec(&pvec);
54 
55 	sg_free_table(st);
56 }
57 
58 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
59 			 size_t size, struct intel_memory_region *mr,
60 			 struct address_space *mapping,
61 			 unsigned int max_segment)
62 {
63 	const unsigned long page_count = size / PAGE_SIZE;
64 	unsigned long i;
65 	struct scatterlist *sg;
66 	struct page *page;
67 	unsigned long last_pfn = 0;	/* suppress gcc warning */
68 	gfp_t noreclaim;
69 	int ret;
70 
71 	/*
72 	 * If there's no chance of allocating enough pages for the whole
73 	 * object, bail early.
74 	 */
75 	if (size > resource_size(&mr->region))
76 		return -ENOMEM;
77 
78 	if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
79 		return -ENOMEM;
80 
81 	/*
82 	 * Get the list of pages out of our struct file.  They'll be pinned
83 	 * at this point until we release them.
84 	 *
85 	 * Fail silently without starting the shrinker
86 	 */
87 	mapping_set_unevictable(mapping);
88 	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
89 	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
90 
91 	sg = st->sgl;
92 	st->nents = 0;
93 	for (i = 0; i < page_count; i++) {
94 		const unsigned int shrink[] = {
95 			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
96 			0,
97 		}, *s = shrink;
98 		gfp_t gfp = noreclaim;
99 
100 		do {
101 			cond_resched();
102 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
103 			if (!IS_ERR(page))
104 				break;
105 
106 			if (!*s) {
107 				ret = PTR_ERR(page);
108 				goto err_sg;
109 			}
110 
111 			i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
112 
113 			/*
114 			 * We've tried hard to allocate the memory by reaping
115 			 * our own buffer, now let the real VM do its job and
116 			 * go down in flames if truly OOM.
117 			 *
118 			 * However, since graphics tend to be disposable,
119 			 * defer the oom here by reporting the ENOMEM back
120 			 * to userspace.
121 			 */
122 			if (!*s) {
123 				/* reclaim and warn, but no oom */
124 				gfp = mapping_gfp_mask(mapping);
125 
126 				/*
127 				 * Our bo are always dirty and so we require
128 				 * kswapd to reclaim our pages (direct reclaim
129 				 * does not effectively begin pageout of our
130 				 * buffers on its own). However, direct reclaim
131 				 * only waits for kswapd when under allocation
132 				 * congestion. So as a result __GFP_RECLAIM is
133 				 * unreliable and fails to actually reclaim our
134 				 * dirty pages -- unless you try over and over
135 				 * again with !__GFP_NORETRY. However, we still
136 				 * want to fail this allocation rather than
137 				 * trigger the out-of-memory killer and for
138 				 * this we want __GFP_RETRY_MAYFAIL.
139 				 */
140 				gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
141 			}
142 		} while (1);
143 
144 		if (!i ||
145 		    sg->length >= max_segment ||
146 		    page_to_pfn(page) != last_pfn + 1) {
147 			if (i)
148 				sg = sg_next(sg);
149 
150 			st->nents++;
151 			sg_set_page(sg, page, PAGE_SIZE, 0);
152 		} else {
153 			sg->length += PAGE_SIZE;
154 		}
155 		last_pfn = page_to_pfn(page);
156 
157 		/* Check that the i965g/gm workaround works. */
158 		GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
159 	}
160 	if (sg) /* loop terminated early; short sg table */
161 		sg_mark_end(sg);
162 
163 	/* Trim unused sg entries to avoid wasting memory. */
164 	i915_sg_trim(st);
165 
166 	return 0;
167 err_sg:
168 	sg_mark_end(sg);
169 	if (sg != st->sgl) {
170 		shmem_sg_free_table(st, mapping, false, false);
171 	} else {
172 		mapping_clear_unevictable(mapping);
173 		sg_free_table(st);
174 	}
175 
176 	/*
177 	 * shmemfs first checks if there is enough memory to allocate the page
178 	 * and reports ENOSPC should there be insufficient, along with the usual
179 	 * ENOMEM for a genuine allocation failure.
180 	 *
181 	 * We use ENOSPC in our driver to mean that we have run out of aperture
182 	 * space and so want to translate the error from shmemfs back to our
183 	 * usual understanding of ENOMEM.
184 	 */
185 	if (ret == -ENOSPC)
186 		ret = -ENOMEM;
187 
188 	return ret;
189 }
190 
191 static int shmem_get_pages(struct drm_i915_gem_object *obj)
192 {
193 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
194 	struct intel_memory_region *mem = obj->mm.region;
195 	struct address_space *mapping = obj->base.filp->f_mapping;
196 	const unsigned long page_count = obj->base.size / PAGE_SIZE;
197 	unsigned int max_segment = i915_sg_segment_size();
198 	struct sg_table *st;
199 	struct sgt_iter sgt_iter;
200 	struct page *page;
201 	int ret;
202 
203 	/*
204 	 * Assert that the object is not currently in any GPU domain. As it
205 	 * wasn't in the GTT, there shouldn't be any way it could have been in
206 	 * a GPU cache
207 	 */
208 	GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
209 	GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
210 
211 rebuild_st:
212 	st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
213 	if (!st)
214 		return -ENOMEM;
215 
216 	ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
217 				   max_segment);
218 	if (ret)
219 		goto err_st;
220 
221 	ret = i915_gem_gtt_prepare_pages(obj, st);
222 	if (ret) {
223 		/*
224 		 * DMA remapping failed? One possible cause is that
225 		 * it could not reserve enough large entries, asking
226 		 * for PAGE_SIZE chunks instead may be helpful.
227 		 */
228 		if (max_segment > PAGE_SIZE) {
229 			for_each_sgt_page(page, sgt_iter, st)
230 				put_page(page);
231 			sg_free_table(st);
232 			kfree(st);
233 
234 			max_segment = PAGE_SIZE;
235 			goto rebuild_st;
236 		} else {
237 			dev_warn(i915->drm.dev,
238 				 "Failed to DMA remap %lu pages\n",
239 				 page_count);
240 			goto err_pages;
241 		}
242 	}
243 
244 	if (i915_gem_object_needs_bit17_swizzle(obj))
245 		i915_gem_object_do_bit_17_swizzle(obj, st);
246 
247 	if (i915_gem_object_can_bypass_llc(obj))
248 		obj->cache_dirty = true;
249 
250 	__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
251 
252 	return 0;
253 
254 err_pages:
255 	shmem_sg_free_table(st, mapping, false, false);
256 	/*
257 	 * shmemfs first checks if there is enough memory to allocate the page
258 	 * and reports ENOSPC should there be insufficient, along with the usual
259 	 * ENOMEM for a genuine allocation failure.
260 	 *
261 	 * We use ENOSPC in our driver to mean that we have run out of aperture
262 	 * space and so want to translate the error from shmemfs back to our
263 	 * usual understanding of ENOMEM.
264 	 */
265 err_st:
266 	if (ret == -ENOSPC)
267 		ret = -ENOMEM;
268 
269 	kfree(st);
270 
271 	return ret;
272 }
273 
274 static int
275 shmem_truncate(struct drm_i915_gem_object *obj)
276 {
277 	/*
278 	 * Our goal here is to return as much of the memory as
279 	 * is possible back to the system as we are called from OOM.
280 	 * To do this we must instruct the shmfs to drop all of its
281 	 * backing pages, *now*.
282 	 */
283 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
284 	obj->mm.madv = __I915_MADV_PURGED;
285 	obj->mm.pages = ERR_PTR(-EFAULT);
286 
287 	return 0;
288 }
289 
290 void __shmem_writeback(size_t size, struct address_space *mapping)
291 {
292 	struct writeback_control wbc = {
293 		.sync_mode = WB_SYNC_NONE,
294 		.nr_to_write = SWAP_CLUSTER_MAX,
295 		.range_start = 0,
296 		.range_end = LLONG_MAX,
297 		.for_reclaim = 1,
298 	};
299 	unsigned long i;
300 
301 	/*
302 	 * Leave mmapings intact (GTT will have been revoked on unbinding,
303 	 * leaving only CPU mmapings around) and add those pages to the LRU
304 	 * instead of invoking writeback so they are aged and paged out
305 	 * as normal.
306 	 */
307 
308 	/* Begin writeback on each dirty page */
309 	for (i = 0; i < size >> PAGE_SHIFT; i++) {
310 		struct page *page;
311 
312 		page = find_lock_page(mapping, i);
313 		if (!page)
314 			continue;
315 
316 		if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
317 			int ret;
318 
319 			SetPageReclaim(page);
320 			ret = mapping->a_ops->writepage(page, &wbc);
321 			if (!PageWriteback(page))
322 				ClearPageReclaim(page);
323 			if (!ret)
324 				goto put;
325 		}
326 		unlock_page(page);
327 put:
328 		put_page(page);
329 	}
330 }
331 
332 static void
333 shmem_writeback(struct drm_i915_gem_object *obj)
334 {
335 	__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
336 }
337 
338 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
339 {
340 	switch (obj->mm.madv) {
341 	case I915_MADV_DONTNEED:
342 		return i915_gem_object_truncate(obj);
343 	case __I915_MADV_PURGED:
344 		return 0;
345 	}
346 
347 	if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
348 		shmem_writeback(obj);
349 
350 	return 0;
351 }
352 
353 void
354 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
355 				struct sg_table *pages,
356 				bool needs_clflush)
357 {
358 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
359 
360 	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
361 
362 	if (obj->mm.madv == I915_MADV_DONTNEED)
363 		obj->mm.dirty = false;
364 
365 	if (needs_clflush &&
366 	    (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
367 	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
368 		drm_clflush_sg(pages);
369 
370 	__start_cpu_write(obj);
371 	/*
372 	 * On non-LLC platforms, force the flush-on-acquire if this is ever
373 	 * swapped-in. Our async flush path is not trust worthy enough yet(and
374 	 * happens in the wrong order), and with some tricks it's conceivable
375 	 * for userspace to change the cache-level to I915_CACHE_NONE after the
376 	 * pages are swapped-in, and since execbuf binds the object before doing
377 	 * the async flush, we have a race window.
378 	 */
379 	if (!HAS_LLC(i915))
380 		obj->cache_dirty = true;
381 }
382 
383 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
384 {
385 	__i915_gem_object_release_shmem(obj, pages, true);
386 
387 	i915_gem_gtt_finish_pages(obj, pages);
388 
389 	if (i915_gem_object_needs_bit17_swizzle(obj))
390 		i915_gem_object_save_bit_17_swizzle(obj, pages);
391 
392 	shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
393 			    obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
394 	kfree(pages);
395 	obj->mm.dirty = false;
396 }
397 
398 static void
399 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
400 {
401 	if (likely(i915_gem_object_has_struct_page(obj)))
402 		i915_gem_object_put_pages_shmem(obj, pages);
403 	else
404 		i915_gem_object_put_pages_phys(obj, pages);
405 }
406 
407 static int
408 shmem_pwrite(struct drm_i915_gem_object *obj,
409 	     const struct drm_i915_gem_pwrite *arg)
410 {
411 	struct address_space *mapping = obj->base.filp->f_mapping;
412 	const struct address_space_operations *aops = mapping->a_ops;
413 	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
414 	u64 remain, offset;
415 	unsigned int pg;
416 
417 	/* Caller already validated user args */
418 	GEM_BUG_ON(!access_ok(user_data, arg->size));
419 
420 	if (!i915_gem_object_has_struct_page(obj))
421 		return i915_gem_object_pwrite_phys(obj, arg);
422 
423 	/*
424 	 * Before we instantiate/pin the backing store for our use, we
425 	 * can prepopulate the shmemfs filp efficiently using a write into
426 	 * the pagecache. We avoid the penalty of instantiating all the
427 	 * pages, important if the user is just writing to a few and never
428 	 * uses the object on the GPU, and using a direct write into shmemfs
429 	 * allows it to avoid the cost of retrieving a page (either swapin
430 	 * or clearing-before-use) before it is overwritten.
431 	 */
432 	if (i915_gem_object_has_pages(obj))
433 		return -ENODEV;
434 
435 	if (obj->mm.madv != I915_MADV_WILLNEED)
436 		return -EFAULT;
437 
438 	/*
439 	 * Before the pages are instantiated the object is treated as being
440 	 * in the CPU domain. The pages will be clflushed as required before
441 	 * use, and we can freely write into the pages directly. If userspace
442 	 * races pwrite with any other operation; corruption will ensue -
443 	 * that is userspace's prerogative!
444 	 */
445 
446 	remain = arg->size;
447 	offset = arg->offset;
448 	pg = offset_in_page(offset);
449 
450 	do {
451 		unsigned int len, unwritten;
452 		struct page *page;
453 		void *data, *vaddr;
454 		int err;
455 		char c;
456 
457 		len = PAGE_SIZE - pg;
458 		if (len > remain)
459 			len = remain;
460 
461 		/* Prefault the user page to reduce potential recursion */
462 		err = __get_user(c, user_data);
463 		if (err)
464 			return err;
465 
466 		err = __get_user(c, user_data + len - 1);
467 		if (err)
468 			return err;
469 
470 		err = aops->write_begin(obj->base.filp, mapping, offset, len,
471 					&page, &data);
472 		if (err < 0)
473 			return err;
474 
475 		vaddr = kmap_atomic(page);
476 		unwritten = __copy_from_user_inatomic(vaddr + pg,
477 						      user_data,
478 						      len);
479 		kunmap_atomic(vaddr);
480 
481 		err = aops->write_end(obj->base.filp, mapping, offset, len,
482 				      len - unwritten, page, data);
483 		if (err < 0)
484 			return err;
485 
486 		/* We don't handle -EFAULT, leave it to the caller to check */
487 		if (unwritten)
488 			return -ENODEV;
489 
490 		remain -= len;
491 		user_data += len;
492 		offset += len;
493 		pg = 0;
494 	} while (remain);
495 
496 	return 0;
497 }
498 
499 static int
500 shmem_pread(struct drm_i915_gem_object *obj,
501 	    const struct drm_i915_gem_pread *arg)
502 {
503 	if (!i915_gem_object_has_struct_page(obj))
504 		return i915_gem_object_pread_phys(obj, arg);
505 
506 	return -ENODEV;
507 }
508 
509 static void shmem_release(struct drm_i915_gem_object *obj)
510 {
511 	if (i915_gem_object_has_struct_page(obj))
512 		i915_gem_object_release_memory_region(obj);
513 
514 	fput(obj->base.filp);
515 }
516 
517 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
518 	.name = "i915_gem_object_shmem",
519 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
520 
521 	.get_pages = shmem_get_pages,
522 	.put_pages = shmem_put_pages,
523 	.truncate = shmem_truncate,
524 	.shrink = shmem_shrink,
525 
526 	.pwrite = shmem_pwrite,
527 	.pread = shmem_pread,
528 
529 	.release = shmem_release,
530 };
531 
532 static int __create_shmem(struct drm_i915_private *i915,
533 			  struct drm_gem_object *obj,
534 			  resource_size_t size)
535 {
536 	unsigned long flags = VM_NORESERVE;
537 	struct file *filp;
538 
539 	drm_gem_private_object_init(&i915->drm, obj, size);
540 
541 	if (i915->mm.gemfs)
542 		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
543 						 flags);
544 	else
545 		filp = shmem_file_setup("i915", size, flags);
546 	if (IS_ERR(filp))
547 		return PTR_ERR(filp);
548 
549 	obj->filp = filp;
550 	return 0;
551 }
552 
553 static int shmem_object_init(struct intel_memory_region *mem,
554 			     struct drm_i915_gem_object *obj,
555 			     resource_size_t offset,
556 			     resource_size_t size,
557 			     resource_size_t page_size,
558 			     unsigned int flags)
559 {
560 	static struct lock_class_key lock_class;
561 	struct drm_i915_private *i915 = mem->i915;
562 	struct address_space *mapping;
563 	unsigned int cache_level;
564 	gfp_t mask;
565 	int ret;
566 
567 	ret = __create_shmem(i915, &obj->base, size);
568 	if (ret)
569 		return ret;
570 
571 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
572 	if (IS_I965GM(i915) || IS_I965G(i915)) {
573 		/* 965gm cannot relocate objects above 4GiB. */
574 		mask &= ~__GFP_HIGHMEM;
575 		mask |= __GFP_DMA32;
576 	}
577 
578 	mapping = obj->base.filp->f_mapping;
579 	mapping_set_gfp_mask(mapping, mask);
580 	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
581 
582 	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
583 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
584 	obj->write_domain = I915_GEM_DOMAIN_CPU;
585 	obj->read_domains = I915_GEM_DOMAIN_CPU;
586 
587 	if (HAS_LLC(i915))
588 		/* On some devices, we can have the GPU use the LLC (the CPU
589 		 * cache) for about a 10% performance improvement
590 		 * compared to uncached.  Graphics requests other than
591 		 * display scanout are coherent with the CPU in
592 		 * accessing this cache.  This means in this mode we
593 		 * don't need to clflush on the CPU side, and on the
594 		 * GPU side we only need to flush internal caches to
595 		 * get data visible to the CPU.
596 		 *
597 		 * However, we maintain the display planes as UC, and so
598 		 * need to rebind when first used as such.
599 		 */
600 		cache_level = I915_CACHE_LLC;
601 	else
602 		cache_level = I915_CACHE_NONE;
603 
604 	i915_gem_object_set_cache_coherency(obj, cache_level);
605 
606 	i915_gem_object_init_memory_region(obj, mem);
607 
608 	return 0;
609 }
610 
611 struct drm_i915_gem_object *
612 i915_gem_object_create_shmem(struct drm_i915_private *i915,
613 			     resource_size_t size)
614 {
615 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
616 					     size, 0, 0);
617 }
618 
619 /* Allocate a new GEM object and fill it with the supplied data */
620 struct drm_i915_gem_object *
621 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
622 				       const void *data, resource_size_t size)
623 {
624 	struct drm_i915_gem_object *obj;
625 	struct file *file;
626 	const struct address_space_operations *aops;
627 	resource_size_t offset;
628 	int err;
629 
630 	GEM_WARN_ON(IS_DGFX(dev_priv));
631 	obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
632 	if (IS_ERR(obj))
633 		return obj;
634 
635 	GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
636 
637 	file = obj->base.filp;
638 	aops = file->f_mapping->a_ops;
639 	offset = 0;
640 	do {
641 		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
642 		struct page *page;
643 		void *pgdata, *vaddr;
644 
645 		err = aops->write_begin(file, file->f_mapping, offset, len,
646 					&page, &pgdata);
647 		if (err < 0)
648 			goto fail;
649 
650 		vaddr = kmap(page);
651 		memcpy(vaddr, data, len);
652 		kunmap(page);
653 
654 		err = aops->write_end(file, file->f_mapping, offset, len, len,
655 				      page, pgdata);
656 		if (err < 0)
657 			goto fail;
658 
659 		size -= len;
660 		data += len;
661 		offset += len;
662 	} while (size);
663 
664 	return obj;
665 
666 fail:
667 	i915_gem_object_put(obj);
668 	return ERR_PTR(err);
669 }
670 
671 static int init_shmem(struct intel_memory_region *mem)
672 {
673 	i915_gemfs_init(mem->i915);
674 	intel_memory_region_set_name(mem, "system");
675 
676 	return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
677 }
678 
679 static int release_shmem(struct intel_memory_region *mem)
680 {
681 	i915_gemfs_fini(mem->i915);
682 	return 0;
683 }
684 
685 static const struct intel_memory_region_ops shmem_region_ops = {
686 	.init = init_shmem,
687 	.release = release_shmem,
688 	.init_object = shmem_object_init,
689 };
690 
691 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
692 						 u16 type, u16 instance)
693 {
694 	return intel_memory_region_create(i915, 0,
695 					  totalram_pages() << PAGE_SHIFT,
696 					  PAGE_SIZE, 0, 0,
697 					  type, instance,
698 					  &shmem_region_ops);
699 }
700 
701 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
702 {
703 	return obj->ops == &i915_gem_shmem_ops;
704 }
705