xref: /openbmc/linux/drivers/gpu/drm/i915/i915_gem.c (revision 59b4412f)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
39 
40 #include "display/intel_display.h"
41 #include "display/intel_frontbuffer.h"
42 
43 #include "gem/i915_gem_clflush.h"
44 #include "gem/i915_gem_context.h"
45 #include "gem/i915_gem_ioctls.h"
46 #include "gem/i915_gem_mman.h"
47 #include "gem/i915_gem_region.h"
48 #include "gt/intel_engine_user.h"
49 #include "gt/intel_gt.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_workarounds.h"
52 
53 #include "i915_drv.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
56 
57 #include "intel_pm.h"
58 
59 static int
60 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
61 {
62 	int err;
63 
64 	err = mutex_lock_interruptible(&ggtt->vm.mutex);
65 	if (err)
66 		return err;
67 
68 	memset(node, 0, sizeof(*node));
69 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 					  size, 0, I915_COLOR_UNEVICTABLE,
71 					  0, ggtt->mappable_end,
72 					  DRM_MM_INSERT_LOW);
73 
74 	mutex_unlock(&ggtt->vm.mutex);
75 
76 	return err;
77 }
78 
79 static void
80 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
81 {
82 	mutex_lock(&ggtt->vm.mutex);
83 	drm_mm_remove_node(node);
84 	mutex_unlock(&ggtt->vm.mutex);
85 }
86 
87 int
88 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 			    struct drm_file *file)
90 {
91 	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
92 	struct drm_i915_gem_get_aperture *args = data;
93 	struct i915_vma *vma;
94 	u64 pinned;
95 
96 	if (mutex_lock_interruptible(&ggtt->vm.mutex))
97 		return -EINTR;
98 
99 	pinned = ggtt->vm.reserved;
100 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
101 		if (i915_vma_is_pinned(vma))
102 			pinned += vma->node.size;
103 
104 	mutex_unlock(&ggtt->vm.mutex);
105 
106 	args->aper_size = ggtt->vm.total;
107 	args->aper_available_size = args->aper_size - pinned;
108 
109 	return 0;
110 }
111 
112 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
113 			   unsigned long flags)
114 {
115 	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
116 	LIST_HEAD(still_in_list);
117 	intel_wakeref_t wakeref;
118 	struct i915_vma *vma;
119 	int ret;
120 
121 	if (list_empty(&obj->vma.list))
122 		return 0;
123 
124 	/*
125 	 * As some machines use ACPI to handle runtime-resume callbacks, and
126 	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
127 	 * as they are required by the shrinker. Ergo, we wake the device up
128 	 * first just in case.
129 	 */
130 	wakeref = intel_runtime_pm_get(rpm);
131 
132 try_again:
133 	ret = 0;
134 	spin_lock(&obj->vma.lock);
135 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
136 						       struct i915_vma,
137 						       obj_link))) {
138 		struct i915_address_space *vm = vma->vm;
139 
140 		list_move_tail(&vma->obj_link, &still_in_list);
141 		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
142 			continue;
143 
144 		if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
145 			ret = -EBUSY;
146 			break;
147 		}
148 
149 		ret = -EAGAIN;
150 		if (!i915_vm_tryopen(vm))
151 			break;
152 
153 		/* Prevent vma being freed by i915_vma_parked as we unbind */
154 		vma = __i915_vma_get(vma);
155 		spin_unlock(&obj->vma.lock);
156 
157 		if (vma) {
158 			ret = -EBUSY;
159 			if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
160 			    !i915_vma_is_active(vma))
161 				ret = i915_vma_unbind(vma);
162 
163 			__i915_vma_put(vma);
164 		}
165 
166 		i915_vm_close(vm);
167 		spin_lock(&obj->vma.lock);
168 	}
169 	list_splice_init(&still_in_list, &obj->vma.list);
170 	spin_unlock(&obj->vma.lock);
171 
172 	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
173 		rcu_barrier(); /* flush the i915_vm_release() */
174 		goto try_again;
175 	}
176 
177 	intel_runtime_pm_put(rpm, wakeref);
178 
179 	return ret;
180 }
181 
182 static int
183 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
184 		     struct drm_i915_gem_pwrite *args,
185 		     struct drm_file *file)
186 {
187 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
188 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
189 
190 	/*
191 	 * We manually control the domain here and pretend that it
192 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
193 	 */
194 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
195 
196 	if (copy_from_user(vaddr, user_data, args->size))
197 		return -EFAULT;
198 
199 	drm_clflush_virt_range(vaddr, args->size);
200 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
201 
202 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
203 	return 0;
204 }
205 
206 static int
207 i915_gem_create(struct drm_file *file,
208 		struct intel_memory_region *mr,
209 		u64 *size_p,
210 		u32 *handle_p)
211 {
212 	struct drm_i915_gem_object *obj;
213 	u32 handle;
214 	u64 size;
215 	int ret;
216 
217 	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
218 	size = round_up(*size_p, mr->min_page_size);
219 	if (size == 0)
220 		return -EINVAL;
221 
222 	/* For most of the ABI (e.g. mmap) we think in system pages */
223 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
224 
225 	/* Allocate the new object */
226 	obj = i915_gem_object_create_region(mr, size, 0);
227 	if (IS_ERR(obj))
228 		return PTR_ERR(obj);
229 
230 	ret = drm_gem_handle_create(file, &obj->base, &handle);
231 	/* drop reference from allocate - handle holds it now */
232 	i915_gem_object_put(obj);
233 	if (ret)
234 		return ret;
235 
236 	*handle_p = handle;
237 	*size_p = size;
238 	return 0;
239 }
240 
241 int
242 i915_gem_dumb_create(struct drm_file *file,
243 		     struct drm_device *dev,
244 		     struct drm_mode_create_dumb *args)
245 {
246 	enum intel_memory_type mem_type;
247 	int cpp = DIV_ROUND_UP(args->bpp, 8);
248 	u32 format;
249 
250 	switch (cpp) {
251 	case 1:
252 		format = DRM_FORMAT_C8;
253 		break;
254 	case 2:
255 		format = DRM_FORMAT_RGB565;
256 		break;
257 	case 4:
258 		format = DRM_FORMAT_XRGB8888;
259 		break;
260 	default:
261 		return -EINVAL;
262 	}
263 
264 	/* have to work out size/pitch and return them */
265 	args->pitch = ALIGN(args->width * cpp, 64);
266 
267 	/* align stride to page size so that we can remap */
268 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
269 						    DRM_FORMAT_MOD_LINEAR))
270 		args->pitch = ALIGN(args->pitch, 4096);
271 
272 	if (args->pitch < args->width)
273 		return -EINVAL;
274 
275 	args->size = mul_u32_u32(args->pitch, args->height);
276 
277 	mem_type = INTEL_MEMORY_SYSTEM;
278 	if (HAS_LMEM(to_i915(dev)))
279 		mem_type = INTEL_MEMORY_LOCAL;
280 
281 	return i915_gem_create(file,
282 			       intel_memory_region_by_type(to_i915(dev),
283 							   mem_type),
284 			       &args->size, &args->handle);
285 }
286 
287 /**
288  * Creates a new mm object and returns a handle to it.
289  * @dev: drm device pointer
290  * @data: ioctl data blob
291  * @file: drm file pointer
292  */
293 int
294 i915_gem_create_ioctl(struct drm_device *dev, void *data,
295 		      struct drm_file *file)
296 {
297 	struct drm_i915_private *i915 = to_i915(dev);
298 	struct drm_i915_gem_create *args = data;
299 
300 	i915_gem_flush_free_objects(i915);
301 
302 	return i915_gem_create(file,
303 			       intel_memory_region_by_type(i915,
304 							   INTEL_MEMORY_SYSTEM),
305 			       &args->size, &args->handle);
306 }
307 
308 static int
309 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
310 	    bool needs_clflush)
311 {
312 	char *vaddr;
313 	int ret;
314 
315 	vaddr = kmap(page);
316 
317 	if (needs_clflush)
318 		drm_clflush_virt_range(vaddr + offset, len);
319 
320 	ret = __copy_to_user(user_data, vaddr + offset, len);
321 
322 	kunmap(page);
323 
324 	return ret ? -EFAULT : 0;
325 }
326 
327 static int
328 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
329 		     struct drm_i915_gem_pread *args)
330 {
331 	unsigned int needs_clflush;
332 	unsigned int idx, offset;
333 	struct dma_fence *fence;
334 	char __user *user_data;
335 	u64 remain;
336 	int ret;
337 
338 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
339 	if (ret)
340 		return ret;
341 
342 	fence = i915_gem_object_lock_fence(obj);
343 	i915_gem_object_finish_access(obj);
344 	if (!fence)
345 		return -ENOMEM;
346 
347 	remain = args->size;
348 	user_data = u64_to_user_ptr(args->data_ptr);
349 	offset = offset_in_page(args->offset);
350 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
351 		struct page *page = i915_gem_object_get_page(obj, idx);
352 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
353 
354 		ret = shmem_pread(page, offset, length, user_data,
355 				  needs_clflush);
356 		if (ret)
357 			break;
358 
359 		remain -= length;
360 		user_data += length;
361 		offset = 0;
362 	}
363 
364 	i915_gem_object_unlock_fence(obj, fence);
365 	return ret;
366 }
367 
368 static inline bool
369 gtt_user_read(struct io_mapping *mapping,
370 	      loff_t base, int offset,
371 	      char __user *user_data, int length)
372 {
373 	void __iomem *vaddr;
374 	unsigned long unwritten;
375 
376 	/* We can use the cpu mem copy function because this is X86. */
377 	vaddr = io_mapping_map_atomic_wc(mapping, base);
378 	unwritten = __copy_to_user_inatomic(user_data,
379 					    (void __force *)vaddr + offset,
380 					    length);
381 	io_mapping_unmap_atomic(vaddr);
382 	if (unwritten) {
383 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
384 		unwritten = copy_to_user(user_data,
385 					 (void __force *)vaddr + offset,
386 					 length);
387 		io_mapping_unmap(vaddr);
388 	}
389 	return unwritten;
390 }
391 
392 static int
393 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
394 		   const struct drm_i915_gem_pread *args)
395 {
396 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
397 	struct i915_ggtt *ggtt = &i915->ggtt;
398 	intel_wakeref_t wakeref;
399 	struct drm_mm_node node;
400 	struct dma_fence *fence;
401 	void __user *user_data;
402 	struct i915_vma *vma;
403 	u64 remain, offset;
404 	int ret;
405 
406 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
407 	vma = ERR_PTR(-ENODEV);
408 	if (!i915_gem_object_is_tiled(obj))
409 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
410 					       PIN_MAPPABLE |
411 					       PIN_NONBLOCK /* NOWARN */ |
412 					       PIN_NOEVICT);
413 	if (!IS_ERR(vma)) {
414 		node.start = i915_ggtt_offset(vma);
415 		node.flags = 0;
416 	} else {
417 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
418 		if (ret)
419 			goto out_rpm;
420 		GEM_BUG_ON(!drm_mm_node_allocated(&node));
421 	}
422 
423 	ret = i915_gem_object_lock_interruptible(obj);
424 	if (ret)
425 		goto out_unpin;
426 
427 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
428 	if (ret) {
429 		i915_gem_object_unlock(obj);
430 		goto out_unpin;
431 	}
432 
433 	fence = i915_gem_object_lock_fence(obj);
434 	i915_gem_object_unlock(obj);
435 	if (!fence) {
436 		ret = -ENOMEM;
437 		goto out_unpin;
438 	}
439 
440 	user_data = u64_to_user_ptr(args->data_ptr);
441 	remain = args->size;
442 	offset = args->offset;
443 
444 	while (remain > 0) {
445 		/* Operation in this page
446 		 *
447 		 * page_base = page offset within aperture
448 		 * page_offset = offset within page
449 		 * page_length = bytes to copy for this page
450 		 */
451 		u32 page_base = node.start;
452 		unsigned page_offset = offset_in_page(offset);
453 		unsigned page_length = PAGE_SIZE - page_offset;
454 		page_length = remain < page_length ? remain : page_length;
455 		if (drm_mm_node_allocated(&node)) {
456 			ggtt->vm.insert_page(&ggtt->vm,
457 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
458 					     node.start, I915_CACHE_NONE, 0);
459 		} else {
460 			page_base += offset & PAGE_MASK;
461 		}
462 
463 		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
464 				  user_data, page_length)) {
465 			ret = -EFAULT;
466 			break;
467 		}
468 
469 		remain -= page_length;
470 		user_data += page_length;
471 		offset += page_length;
472 	}
473 
474 	i915_gem_object_unlock_fence(obj, fence);
475 out_unpin:
476 	if (drm_mm_node_allocated(&node)) {
477 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
478 		remove_mappable_node(ggtt, &node);
479 	} else {
480 		i915_vma_unpin(vma);
481 	}
482 out_rpm:
483 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
484 	return ret;
485 }
486 
487 /**
488  * Reads data from the object referenced by handle.
489  * @dev: drm device pointer
490  * @data: ioctl data blob
491  * @file: drm file pointer
492  *
493  * On error, the contents of *data are undefined.
494  */
495 int
496 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
497 		     struct drm_file *file)
498 {
499 	struct drm_i915_gem_pread *args = data;
500 	struct drm_i915_gem_object *obj;
501 	int ret;
502 
503 	if (args->size == 0)
504 		return 0;
505 
506 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
507 		       args->size))
508 		return -EFAULT;
509 
510 	obj = i915_gem_object_lookup(file, args->handle);
511 	if (!obj)
512 		return -ENOENT;
513 
514 	/* Bounds check source.  */
515 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
516 		ret = -EINVAL;
517 		goto out;
518 	}
519 
520 	trace_i915_gem_object_pread(obj, args->offset, args->size);
521 
522 	ret = i915_gem_object_wait(obj,
523 				   I915_WAIT_INTERRUPTIBLE,
524 				   MAX_SCHEDULE_TIMEOUT);
525 	if (ret)
526 		goto out;
527 
528 	ret = i915_gem_object_pin_pages(obj);
529 	if (ret)
530 		goto out;
531 
532 	ret = i915_gem_shmem_pread(obj, args);
533 	if (ret == -EFAULT || ret == -ENODEV)
534 		ret = i915_gem_gtt_pread(obj, args);
535 
536 	i915_gem_object_unpin_pages(obj);
537 out:
538 	i915_gem_object_put(obj);
539 	return ret;
540 }
541 
542 /* This is the fast write path which cannot handle
543  * page faults in the source data
544  */
545 
546 static inline bool
547 ggtt_write(struct io_mapping *mapping,
548 	   loff_t base, int offset,
549 	   char __user *user_data, int length)
550 {
551 	void __iomem *vaddr;
552 	unsigned long unwritten;
553 
554 	/* We can use the cpu mem copy function because this is X86. */
555 	vaddr = io_mapping_map_atomic_wc(mapping, base);
556 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
557 						      user_data, length);
558 	io_mapping_unmap_atomic(vaddr);
559 	if (unwritten) {
560 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
561 		unwritten = copy_from_user((void __force *)vaddr + offset,
562 					   user_data, length);
563 		io_mapping_unmap(vaddr);
564 	}
565 
566 	return unwritten;
567 }
568 
569 /**
570  * This is the fast pwrite path, where we copy the data directly from the
571  * user into the GTT, uncached.
572  * @obj: i915 GEM object
573  * @args: pwrite arguments structure
574  */
575 static int
576 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
577 			 const struct drm_i915_gem_pwrite *args)
578 {
579 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
580 	struct i915_ggtt *ggtt = &i915->ggtt;
581 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
582 	intel_wakeref_t wakeref;
583 	struct drm_mm_node node;
584 	struct dma_fence *fence;
585 	struct i915_vma *vma;
586 	u64 remain, offset;
587 	void __user *user_data;
588 	int ret;
589 
590 	if (i915_gem_object_has_struct_page(obj)) {
591 		/*
592 		 * Avoid waking the device up if we can fallback, as
593 		 * waking/resuming is very slow (worst-case 10-100 ms
594 		 * depending on PCI sleeps and our own resume time).
595 		 * This easily dwarfs any performance advantage from
596 		 * using the cache bypass of indirect GGTT access.
597 		 */
598 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
599 		if (!wakeref)
600 			return -EFAULT;
601 	} else {
602 		/* No backing pages, no fallback, we must force GGTT access */
603 		wakeref = intel_runtime_pm_get(rpm);
604 	}
605 
606 	vma = ERR_PTR(-ENODEV);
607 	if (!i915_gem_object_is_tiled(obj))
608 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
609 					       PIN_MAPPABLE |
610 					       PIN_NONBLOCK /* NOWARN */ |
611 					       PIN_NOEVICT);
612 	if (!IS_ERR(vma)) {
613 		node.start = i915_ggtt_offset(vma);
614 		node.flags = 0;
615 	} else {
616 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
617 		if (ret)
618 			goto out_rpm;
619 		GEM_BUG_ON(!drm_mm_node_allocated(&node));
620 	}
621 
622 	ret = i915_gem_object_lock_interruptible(obj);
623 	if (ret)
624 		goto out_unpin;
625 
626 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
627 	if (ret) {
628 		i915_gem_object_unlock(obj);
629 		goto out_unpin;
630 	}
631 
632 	fence = i915_gem_object_lock_fence(obj);
633 	i915_gem_object_unlock(obj);
634 	if (!fence) {
635 		ret = -ENOMEM;
636 		goto out_unpin;
637 	}
638 
639 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
640 
641 	user_data = u64_to_user_ptr(args->data_ptr);
642 	offset = args->offset;
643 	remain = args->size;
644 	while (remain) {
645 		/* Operation in this page
646 		 *
647 		 * page_base = page offset within aperture
648 		 * page_offset = offset within page
649 		 * page_length = bytes to copy for this page
650 		 */
651 		u32 page_base = node.start;
652 		unsigned int page_offset = offset_in_page(offset);
653 		unsigned int page_length = PAGE_SIZE - page_offset;
654 		page_length = remain < page_length ? remain : page_length;
655 		if (drm_mm_node_allocated(&node)) {
656 			/* flush the write before we modify the GGTT */
657 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
658 			ggtt->vm.insert_page(&ggtt->vm,
659 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
660 					     node.start, I915_CACHE_NONE, 0);
661 			wmb(); /* flush modifications to the GGTT (insert_page) */
662 		} else {
663 			page_base += offset & PAGE_MASK;
664 		}
665 		/* If we get a fault while copying data, then (presumably) our
666 		 * source page isn't available.  Return the error and we'll
667 		 * retry in the slow path.
668 		 * If the object is non-shmem backed, we retry again with the
669 		 * path that handles page fault.
670 		 */
671 		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
672 			       user_data, page_length)) {
673 			ret = -EFAULT;
674 			break;
675 		}
676 
677 		remain -= page_length;
678 		user_data += page_length;
679 		offset += page_length;
680 	}
681 
682 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
683 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
684 
685 	i915_gem_object_unlock_fence(obj, fence);
686 out_unpin:
687 	if (drm_mm_node_allocated(&node)) {
688 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
689 		remove_mappable_node(ggtt, &node);
690 	} else {
691 		i915_vma_unpin(vma);
692 	}
693 out_rpm:
694 	intel_runtime_pm_put(rpm, wakeref);
695 	return ret;
696 }
697 
698 /* Per-page copy function for the shmem pwrite fastpath.
699  * Flushes invalid cachelines before writing to the target if
700  * needs_clflush_before is set and flushes out any written cachelines after
701  * writing if needs_clflush is set.
702  */
703 static int
704 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
705 	     bool needs_clflush_before,
706 	     bool needs_clflush_after)
707 {
708 	char *vaddr;
709 	int ret;
710 
711 	vaddr = kmap(page);
712 
713 	if (needs_clflush_before)
714 		drm_clflush_virt_range(vaddr + offset, len);
715 
716 	ret = __copy_from_user(vaddr + offset, user_data, len);
717 	if (!ret && needs_clflush_after)
718 		drm_clflush_virt_range(vaddr + offset, len);
719 
720 	kunmap(page);
721 
722 	return ret ? -EFAULT : 0;
723 }
724 
725 static int
726 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
727 		      const struct drm_i915_gem_pwrite *args)
728 {
729 	unsigned int partial_cacheline_write;
730 	unsigned int needs_clflush;
731 	unsigned int offset, idx;
732 	struct dma_fence *fence;
733 	void __user *user_data;
734 	u64 remain;
735 	int ret;
736 
737 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
738 	if (ret)
739 		return ret;
740 
741 	fence = i915_gem_object_lock_fence(obj);
742 	i915_gem_object_finish_access(obj);
743 	if (!fence)
744 		return -ENOMEM;
745 
746 	/* If we don't overwrite a cacheline completely we need to be
747 	 * careful to have up-to-date data by first clflushing. Don't
748 	 * overcomplicate things and flush the entire patch.
749 	 */
750 	partial_cacheline_write = 0;
751 	if (needs_clflush & CLFLUSH_BEFORE)
752 		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
753 
754 	user_data = u64_to_user_ptr(args->data_ptr);
755 	remain = args->size;
756 	offset = offset_in_page(args->offset);
757 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
758 		struct page *page = i915_gem_object_get_page(obj, idx);
759 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
760 
761 		ret = shmem_pwrite(page, offset, length, user_data,
762 				   (offset | length) & partial_cacheline_write,
763 				   needs_clflush & CLFLUSH_AFTER);
764 		if (ret)
765 			break;
766 
767 		remain -= length;
768 		user_data += length;
769 		offset = 0;
770 	}
771 
772 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
773 	i915_gem_object_unlock_fence(obj, fence);
774 
775 	return ret;
776 }
777 
778 /**
779  * Writes data to the object referenced by handle.
780  * @dev: drm device
781  * @data: ioctl data blob
782  * @file: drm file
783  *
784  * On error, the contents of the buffer that were to be modified are undefined.
785  */
786 int
787 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
788 		      struct drm_file *file)
789 {
790 	struct drm_i915_gem_pwrite *args = data;
791 	struct drm_i915_gem_object *obj;
792 	int ret;
793 
794 	if (args->size == 0)
795 		return 0;
796 
797 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
798 		return -EFAULT;
799 
800 	obj = i915_gem_object_lookup(file, args->handle);
801 	if (!obj)
802 		return -ENOENT;
803 
804 	/* Bounds check destination. */
805 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
806 		ret = -EINVAL;
807 		goto err;
808 	}
809 
810 	/* Writes not allowed into this read-only object */
811 	if (i915_gem_object_is_readonly(obj)) {
812 		ret = -EINVAL;
813 		goto err;
814 	}
815 
816 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
817 
818 	ret = -ENODEV;
819 	if (obj->ops->pwrite)
820 		ret = obj->ops->pwrite(obj, args);
821 	if (ret != -ENODEV)
822 		goto err;
823 
824 	ret = i915_gem_object_wait(obj,
825 				   I915_WAIT_INTERRUPTIBLE |
826 				   I915_WAIT_ALL,
827 				   MAX_SCHEDULE_TIMEOUT);
828 	if (ret)
829 		goto err;
830 
831 	ret = i915_gem_object_pin_pages(obj);
832 	if (ret)
833 		goto err;
834 
835 	ret = -EFAULT;
836 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
837 	 * it would end up going through the fenced access, and we'll get
838 	 * different detiling behavior between reading and writing.
839 	 * pread/pwrite currently are reading and writing from the CPU
840 	 * perspective, requiring manual detiling by the client.
841 	 */
842 	if (!i915_gem_object_has_struct_page(obj) ||
843 	    cpu_write_needs_clflush(obj))
844 		/* Note that the gtt paths might fail with non-page-backed user
845 		 * pointers (e.g. gtt mappings when moving data between
846 		 * textures). Fallback to the shmem path in that case.
847 		 */
848 		ret = i915_gem_gtt_pwrite_fast(obj, args);
849 
850 	if (ret == -EFAULT || ret == -ENOSPC) {
851 		if (i915_gem_object_has_struct_page(obj))
852 			ret = i915_gem_shmem_pwrite(obj, args);
853 		else
854 			ret = i915_gem_phys_pwrite(obj, args, file);
855 	}
856 
857 	i915_gem_object_unpin_pages(obj);
858 err:
859 	i915_gem_object_put(obj);
860 	return ret;
861 }
862 
863 /**
864  * Called when user space has done writes to this buffer
865  * @dev: drm device
866  * @data: ioctl data blob
867  * @file: drm file
868  */
869 int
870 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
871 			 struct drm_file *file)
872 {
873 	struct drm_i915_gem_sw_finish *args = data;
874 	struct drm_i915_gem_object *obj;
875 
876 	obj = i915_gem_object_lookup(file, args->handle);
877 	if (!obj)
878 		return -ENOENT;
879 
880 	/*
881 	 * Proxy objects are barred from CPU access, so there is no
882 	 * need to ban sw_finish as it is a nop.
883 	 */
884 
885 	/* Pinned buffers may be scanout, so flush the cache */
886 	i915_gem_object_flush_if_display(obj);
887 	i915_gem_object_put(obj);
888 
889 	return 0;
890 }
891 
892 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
893 {
894 	struct drm_i915_gem_object *obj, *on;
895 	int i;
896 
897 	/*
898 	 * Only called during RPM suspend. All users of the userfault_list
899 	 * must be holding an RPM wakeref to ensure that this can not
900 	 * run concurrently with themselves (and use the struct_mutex for
901 	 * protection between themselves).
902 	 */
903 
904 	list_for_each_entry_safe(obj, on,
905 				 &i915->ggtt.userfault_list, userfault_link)
906 		__i915_gem_object_release_mmap_gtt(obj);
907 
908 	/*
909 	 * The fence will be lost when the device powers down. If any were
910 	 * in use by hardware (i.e. they are pinned), we should not be powering
911 	 * down! All other fences will be reacquired by the user upon waking.
912 	 */
913 	for (i = 0; i < i915->ggtt.num_fences; i++) {
914 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
915 
916 		/*
917 		 * Ideally we want to assert that the fence register is not
918 		 * live at this point (i.e. that no piece of code will be
919 		 * trying to write through fence + GTT, as that both violates
920 		 * our tracking of activity and associated locking/barriers,
921 		 * but also is illegal given that the hw is powered down).
922 		 *
923 		 * Previously we used reg->pin_count as a "liveness" indicator.
924 		 * That is not sufficient, and we need a more fine-grained
925 		 * tool if we want to have a sanity check here.
926 		 */
927 
928 		if (!reg->vma)
929 			continue;
930 
931 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
932 		reg->dirty = true;
933 	}
934 }
935 
936 struct i915_vma *
937 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
938 			 const struct i915_ggtt_view *view,
939 			 u64 size,
940 			 u64 alignment,
941 			 u64 flags)
942 {
943 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
944 	struct i915_ggtt *ggtt = &i915->ggtt;
945 	struct i915_vma *vma;
946 	int ret;
947 
948 	if (flags & PIN_MAPPABLE &&
949 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
950 		/*
951 		 * If the required space is larger than the available
952 		 * aperture, we will not able to find a slot for the
953 		 * object and unbinding the object now will be in
954 		 * vain. Worse, doing so may cause us to ping-pong
955 		 * the object in and out of the Global GTT and
956 		 * waste a lot of cycles under the mutex.
957 		 */
958 		if (obj->base.size > ggtt->mappable_end)
959 			return ERR_PTR(-E2BIG);
960 
961 		/*
962 		 * If NONBLOCK is set the caller is optimistically
963 		 * trying to cache the full object within the mappable
964 		 * aperture, and *must* have a fallback in place for
965 		 * situations where we cannot bind the object. We
966 		 * can be a little more lax here and use the fallback
967 		 * more often to avoid costly migrations of ourselves
968 		 * and other objects within the aperture.
969 		 *
970 		 * Half-the-aperture is used as a simple heuristic.
971 		 * More interesting would to do search for a free
972 		 * block prior to making the commitment to unbind.
973 		 * That caters for the self-harm case, and with a
974 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
975 		 * we could try to minimise harm to others.
976 		 */
977 		if (flags & PIN_NONBLOCK &&
978 		    obj->base.size > ggtt->mappable_end / 2)
979 			return ERR_PTR(-ENOSPC);
980 	}
981 
982 	vma = i915_vma_instance(obj, &ggtt->vm, view);
983 	if (IS_ERR(vma))
984 		return vma;
985 
986 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
987 		if (flags & PIN_NONBLOCK) {
988 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
989 				return ERR_PTR(-ENOSPC);
990 
991 			if (flags & PIN_MAPPABLE &&
992 			    vma->fence_size > ggtt->mappable_end / 2)
993 				return ERR_PTR(-ENOSPC);
994 		}
995 
996 		ret = i915_vma_unbind(vma);
997 		if (ret)
998 			return ERR_PTR(ret);
999 	}
1000 
1001 	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1002 	if (ret)
1003 		return ERR_PTR(ret);
1004 
1005 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1006 		mutex_lock(&ggtt->vm.mutex);
1007 		i915_vma_revoke_fence(vma);
1008 		mutex_unlock(&ggtt->vm.mutex);
1009 	}
1010 
1011 	ret = i915_vma_wait_for_bind(vma);
1012 	if (ret) {
1013 		i915_vma_unpin(vma);
1014 		return ERR_PTR(ret);
1015 	}
1016 
1017 	return vma;
1018 }
1019 
1020 int
1021 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1022 		       struct drm_file *file_priv)
1023 {
1024 	struct drm_i915_private *i915 = to_i915(dev);
1025 	struct drm_i915_gem_madvise *args = data;
1026 	struct drm_i915_gem_object *obj;
1027 	int err;
1028 
1029 	switch (args->madv) {
1030 	case I915_MADV_DONTNEED:
1031 	case I915_MADV_WILLNEED:
1032 	    break;
1033 	default:
1034 	    return -EINVAL;
1035 	}
1036 
1037 	obj = i915_gem_object_lookup(file_priv, args->handle);
1038 	if (!obj)
1039 		return -ENOENT;
1040 
1041 	err = mutex_lock_interruptible(&obj->mm.lock);
1042 	if (err)
1043 		goto out;
1044 
1045 	if (i915_gem_object_has_pages(obj) &&
1046 	    i915_gem_object_is_tiled(obj) &&
1047 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1048 		if (obj->mm.madv == I915_MADV_WILLNEED) {
1049 			GEM_BUG_ON(!obj->mm.quirked);
1050 			__i915_gem_object_unpin_pages(obj);
1051 			obj->mm.quirked = false;
1052 		}
1053 		if (args->madv == I915_MADV_WILLNEED) {
1054 			GEM_BUG_ON(obj->mm.quirked);
1055 			__i915_gem_object_pin_pages(obj);
1056 			obj->mm.quirked = true;
1057 		}
1058 	}
1059 
1060 	if (obj->mm.madv != __I915_MADV_PURGED)
1061 		obj->mm.madv = args->madv;
1062 
1063 	if (i915_gem_object_has_pages(obj)) {
1064 		struct list_head *list;
1065 
1066 		if (i915_gem_object_is_shrinkable(obj)) {
1067 			unsigned long flags;
1068 
1069 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
1070 
1071 			if (obj->mm.madv != I915_MADV_WILLNEED)
1072 				list = &i915->mm.purge_list;
1073 			else
1074 				list = &i915->mm.shrink_list;
1075 			list_move_tail(&obj->mm.link, list);
1076 
1077 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1078 		}
1079 	}
1080 
1081 	/* if the object is no longer attached, discard its backing storage */
1082 	if (obj->mm.madv == I915_MADV_DONTNEED &&
1083 	    !i915_gem_object_has_pages(obj))
1084 		i915_gem_object_truncate(obj);
1085 
1086 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1087 	mutex_unlock(&obj->mm.lock);
1088 
1089 out:
1090 	i915_gem_object_put(obj);
1091 	return err;
1092 }
1093 
1094 int i915_gem_init(struct drm_i915_private *dev_priv)
1095 {
1096 	int ret;
1097 
1098 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
1099 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1100 		mkwrite_device_info(dev_priv)->page_sizes =
1101 			I915_GTT_PAGE_SIZE_4K;
1102 
1103 	ret = i915_gem_init_userptr(dev_priv);
1104 	if (ret)
1105 		return ret;
1106 
1107 	intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1108 	intel_wopcm_init(&dev_priv->wopcm);
1109 
1110 	ret = i915_init_ggtt(dev_priv);
1111 	if (ret) {
1112 		GEM_BUG_ON(ret == -EIO);
1113 		goto err_unlock;
1114 	}
1115 
1116 	/*
1117 	 * Despite its name intel_init_clock_gating applies both display
1118 	 * clock gating workarounds; GT mmio workarounds and the occasional
1119 	 * GT power context workaround. Worse, sometimes it includes a context
1120 	 * register workaround which we need to apply before we record the
1121 	 * default HW state for all contexts.
1122 	 *
1123 	 * FIXME: break up the workarounds and apply them at the right time!
1124 	 */
1125 	intel_init_clock_gating(dev_priv);
1126 
1127 	ret = intel_gt_init(&dev_priv->gt);
1128 	if (ret)
1129 		goto err_unlock;
1130 
1131 	return 0;
1132 
1133 	/*
1134 	 * Unwinding is complicated by that we want to handle -EIO to mean
1135 	 * disable GPU submission but keep KMS alive. We want to mark the
1136 	 * HW as irrevisibly wedged, but keep enough state around that the
1137 	 * driver doesn't explode during runtime.
1138 	 */
1139 err_unlock:
1140 	i915_gem_drain_workqueue(dev_priv);
1141 
1142 	if (ret != -EIO) {
1143 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1144 		i915_gem_cleanup_userptr(dev_priv);
1145 	}
1146 
1147 	if (ret == -EIO) {
1148 		/*
1149 		 * Allow engines or uC initialisation to fail by marking the GPU
1150 		 * as wedged. But we only want to do this when the GPU is angry,
1151 		 * for all other failure, such as an allocation failure, bail.
1152 		 */
1153 		if (!intel_gt_is_wedged(&dev_priv->gt)) {
1154 			i915_probe_error(dev_priv,
1155 					 "Failed to initialize GPU, declaring it wedged!\n");
1156 			intel_gt_set_wedged(&dev_priv->gt);
1157 		}
1158 
1159 		/* Minimal basic recovery for KMS */
1160 		ret = i915_ggtt_enable_hw(dev_priv);
1161 		i915_ggtt_resume(&dev_priv->ggtt);
1162 		intel_init_clock_gating(dev_priv);
1163 	}
1164 
1165 	i915_gem_drain_freed_objects(dev_priv);
1166 	return ret;
1167 }
1168 
1169 void i915_gem_driver_register(struct drm_i915_private *i915)
1170 {
1171 	i915_gem_driver_register__shrinker(i915);
1172 
1173 	intel_engines_driver_register(i915);
1174 }
1175 
1176 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1177 {
1178 	i915_gem_driver_unregister__shrinker(i915);
1179 }
1180 
1181 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1182 {
1183 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1184 
1185 	i915_gem_suspend_late(dev_priv);
1186 	intel_gt_driver_remove(&dev_priv->gt);
1187 	dev_priv->uabi_engines = RB_ROOT;
1188 
1189 	/* Flush any outstanding unpin_work. */
1190 	i915_gem_drain_workqueue(dev_priv);
1191 
1192 	i915_gem_drain_freed_objects(dev_priv);
1193 }
1194 
1195 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1196 {
1197 	i915_gem_driver_release__contexts(dev_priv);
1198 
1199 	intel_gt_driver_release(&dev_priv->gt);
1200 
1201 	intel_wa_list_free(&dev_priv->gt_wa_list);
1202 
1203 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1204 	i915_gem_cleanup_userptr(dev_priv);
1205 
1206 	i915_gem_drain_freed_objects(dev_priv);
1207 
1208 	drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1209 }
1210 
1211 static void i915_gem_init__mm(struct drm_i915_private *i915)
1212 {
1213 	spin_lock_init(&i915->mm.obj_lock);
1214 
1215 	init_llist_head(&i915->mm.free_list);
1216 
1217 	INIT_LIST_HEAD(&i915->mm.purge_list);
1218 	INIT_LIST_HEAD(&i915->mm.shrink_list);
1219 
1220 	i915_gem_init__objects(i915);
1221 }
1222 
1223 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1224 {
1225 	i915_gem_init__mm(dev_priv);
1226 	i915_gem_init__contexts(dev_priv);
1227 
1228 	spin_lock_init(&dev_priv->fb_tracking.lock);
1229 }
1230 
1231 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1232 {
1233 	i915_gem_drain_freed_objects(dev_priv);
1234 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1235 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1236 	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1237 }
1238 
1239 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1240 {
1241 	/* Discard all purgeable objects, let userspace recover those as
1242 	 * required after resuming.
1243 	 */
1244 	i915_gem_shrink_all(dev_priv);
1245 
1246 	return 0;
1247 }
1248 
1249 int i915_gem_freeze_late(struct drm_i915_private *i915)
1250 {
1251 	struct drm_i915_gem_object *obj;
1252 	intel_wakeref_t wakeref;
1253 
1254 	/*
1255 	 * Called just before we write the hibernation image.
1256 	 *
1257 	 * We need to update the domain tracking to reflect that the CPU
1258 	 * will be accessing all the pages to create and restore from the
1259 	 * hibernation, and so upon restoration those pages will be in the
1260 	 * CPU domain.
1261 	 *
1262 	 * To make sure the hibernation image contains the latest state,
1263 	 * we update that state just before writing out the image.
1264 	 *
1265 	 * To try and reduce the hibernation image, we manually shrink
1266 	 * the objects as well, see i915_gem_freeze()
1267 	 */
1268 
1269 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1270 
1271 	i915_gem_shrink(i915, -1UL, NULL, ~0);
1272 	i915_gem_drain_freed_objects(i915);
1273 
1274 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1275 		i915_gem_object_lock(obj);
1276 		drm_WARN_ON(&i915->drm,
1277 			    i915_gem_object_set_to_cpu_domain(obj, true));
1278 		i915_gem_object_unlock(obj);
1279 	}
1280 
1281 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1282 
1283 	return 0;
1284 }
1285 
1286 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1287 {
1288 	struct drm_i915_file_private *file_priv = file->driver_priv;
1289 	struct i915_request *request;
1290 
1291 	/* Clean up our request list when the client is going away, so that
1292 	 * later retire_requests won't dereference our soon-to-be-gone
1293 	 * file_priv.
1294 	 */
1295 	spin_lock(&file_priv->mm.lock);
1296 	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1297 		request->file_priv = NULL;
1298 	spin_unlock(&file_priv->mm.lock);
1299 }
1300 
1301 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1302 {
1303 	struct drm_i915_file_private *file_priv;
1304 	int ret;
1305 
1306 	DRM_DEBUG("\n");
1307 
1308 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1309 	if (!file_priv)
1310 		return -ENOMEM;
1311 
1312 	file->driver_priv = file_priv;
1313 	file_priv->dev_priv = i915;
1314 	file_priv->file = file;
1315 
1316 	spin_lock_init(&file_priv->mm.lock);
1317 	INIT_LIST_HEAD(&file_priv->mm.request_list);
1318 
1319 	file_priv->bsd_engine = -1;
1320 	file_priv->hang_timestamp = jiffies;
1321 
1322 	ret = i915_gem_context_open(i915, file);
1323 	if (ret)
1324 		kfree(file_priv);
1325 
1326 	return ret;
1327 }
1328 
1329 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1330 #include "selftests/mock_gem_device.c"
1331 #include "selftests/i915_gem.c"
1332 #endif
1333