xref: /openbmc/linux/drivers/gpu/drm/i915/i915_gem.c (revision 51b67a6e)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/dma-resv.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mman.h>
40 
41 #include "display/intel_display.h"
42 #include "display/intel_frontbuffer.h"
43 
44 #include "gem/i915_gem_clflush.h"
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_ioctls.h"
47 #include "gem/i915_gem_pm.h"
48 #include "gem/i915_gemfs.h"
49 #include "gt/intel_engine_user.h"
50 #include "gt/intel_gt.h"
51 #include "gt/intel_gt_pm.h"
52 #include "gt/intel_mocs.h"
53 #include "gt/intel_reset.h"
54 #include "gt/intel_renderstate.h"
55 #include "gt/intel_workarounds.h"
56 
57 #include "i915_drv.h"
58 #include "i915_scatterlist.h"
59 #include "i915_trace.h"
60 #include "i915_vgpu.h"
61 
62 #include "intel_pm.h"
63 
64 static int
65 insert_mappable_node(struct i915_ggtt *ggtt,
66                      struct drm_mm_node *node, u32 size)
67 {
68 	memset(node, 0, sizeof(*node));
69 	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 					   size, 0, I915_COLOR_UNEVICTABLE,
71 					   0, ggtt->mappable_end,
72 					   DRM_MM_INSERT_LOW);
73 }
74 
75 static void
76 remove_mappable_node(struct drm_mm_node *node)
77 {
78 	drm_mm_remove_node(node);
79 }
80 
81 int
82 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
83 			    struct drm_file *file)
84 {
85 	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
86 	struct drm_i915_gem_get_aperture *args = data;
87 	struct i915_vma *vma;
88 	u64 pinned;
89 
90 	mutex_lock(&ggtt->vm.mutex);
91 
92 	pinned = ggtt->vm.reserved;
93 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
94 		if (i915_vma_is_pinned(vma))
95 			pinned += vma->node.size;
96 
97 	mutex_unlock(&ggtt->vm.mutex);
98 
99 	args->aper_size = ggtt->vm.total;
100 	args->aper_available_size = args->aper_size - pinned;
101 
102 	return 0;
103 }
104 
105 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
106 			   unsigned long flags)
107 {
108 	struct i915_vma *vma;
109 	LIST_HEAD(still_in_list);
110 	int ret = 0;
111 
112 	lockdep_assert_held(&obj->base.dev->struct_mutex);
113 
114 	spin_lock(&obj->vma.lock);
115 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
116 						       struct i915_vma,
117 						       obj_link))) {
118 		list_move_tail(&vma->obj_link, &still_in_list);
119 		spin_unlock(&obj->vma.lock);
120 
121 		ret = -EBUSY;
122 		if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
123 		    !i915_vma_is_active(vma))
124 			ret = i915_vma_unbind(vma);
125 
126 		spin_lock(&obj->vma.lock);
127 	}
128 	list_splice(&still_in_list, &obj->vma.list);
129 	spin_unlock(&obj->vma.lock);
130 
131 	return ret;
132 }
133 
134 static int
135 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
136 		     struct drm_i915_gem_pwrite *args,
137 		     struct drm_file *file)
138 {
139 	void *vaddr = obj->phys_handle->vaddr + args->offset;
140 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
141 
142 	/*
143 	 * We manually control the domain here and pretend that it
144 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
145 	 */
146 	intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
147 
148 	if (copy_from_user(vaddr, user_data, args->size))
149 		return -EFAULT;
150 
151 	drm_clflush_virt_range(vaddr, args->size);
152 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
153 
154 	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
155 	return 0;
156 }
157 
158 static int
159 i915_gem_create(struct drm_file *file,
160 		struct drm_i915_private *dev_priv,
161 		u64 *size_p,
162 		u32 *handle_p)
163 {
164 	struct drm_i915_gem_object *obj;
165 	u32 handle;
166 	u64 size;
167 	int ret;
168 
169 	size = round_up(*size_p, PAGE_SIZE);
170 	if (size == 0)
171 		return -EINVAL;
172 
173 	/* Allocate the new object */
174 	obj = i915_gem_object_create_shmem(dev_priv, size);
175 	if (IS_ERR(obj))
176 		return PTR_ERR(obj);
177 
178 	ret = drm_gem_handle_create(file, &obj->base, &handle);
179 	/* drop reference from allocate - handle holds it now */
180 	i915_gem_object_put(obj);
181 	if (ret)
182 		return ret;
183 
184 	*handle_p = handle;
185 	*size_p = size;
186 	return 0;
187 }
188 
189 int
190 i915_gem_dumb_create(struct drm_file *file,
191 		     struct drm_device *dev,
192 		     struct drm_mode_create_dumb *args)
193 {
194 	int cpp = DIV_ROUND_UP(args->bpp, 8);
195 	u32 format;
196 
197 	switch (cpp) {
198 	case 1:
199 		format = DRM_FORMAT_C8;
200 		break;
201 	case 2:
202 		format = DRM_FORMAT_RGB565;
203 		break;
204 	case 4:
205 		format = DRM_FORMAT_XRGB8888;
206 		break;
207 	default:
208 		return -EINVAL;
209 	}
210 
211 	/* have to work out size/pitch and return them */
212 	args->pitch = ALIGN(args->width * cpp, 64);
213 
214 	/* align stride to page size so that we can remap */
215 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
216 						    DRM_FORMAT_MOD_LINEAR))
217 		args->pitch = ALIGN(args->pitch, 4096);
218 
219 	args->size = args->pitch * args->height;
220 	return i915_gem_create(file, to_i915(dev),
221 			       &args->size, &args->handle);
222 }
223 
224 /**
225  * Creates a new mm object and returns a handle to it.
226  * @dev: drm device pointer
227  * @data: ioctl data blob
228  * @file: drm file pointer
229  */
230 int
231 i915_gem_create_ioctl(struct drm_device *dev, void *data,
232 		      struct drm_file *file)
233 {
234 	struct drm_i915_private *dev_priv = to_i915(dev);
235 	struct drm_i915_gem_create *args = data;
236 
237 	i915_gem_flush_free_objects(dev_priv);
238 
239 	return i915_gem_create(file, dev_priv,
240 			       &args->size, &args->handle);
241 }
242 
243 static int
244 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
245 	    bool needs_clflush)
246 {
247 	char *vaddr;
248 	int ret;
249 
250 	vaddr = kmap(page);
251 
252 	if (needs_clflush)
253 		drm_clflush_virt_range(vaddr + offset, len);
254 
255 	ret = __copy_to_user(user_data, vaddr + offset, len);
256 
257 	kunmap(page);
258 
259 	return ret ? -EFAULT : 0;
260 }
261 
262 static int
263 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
264 		     struct drm_i915_gem_pread *args)
265 {
266 	unsigned int needs_clflush;
267 	unsigned int idx, offset;
268 	struct dma_fence *fence;
269 	char __user *user_data;
270 	u64 remain;
271 	int ret;
272 
273 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
274 	if (ret)
275 		return ret;
276 
277 	fence = i915_gem_object_lock_fence(obj);
278 	i915_gem_object_finish_access(obj);
279 	if (!fence)
280 		return -ENOMEM;
281 
282 	remain = args->size;
283 	user_data = u64_to_user_ptr(args->data_ptr);
284 	offset = offset_in_page(args->offset);
285 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
286 		struct page *page = i915_gem_object_get_page(obj, idx);
287 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
288 
289 		ret = shmem_pread(page, offset, length, user_data,
290 				  needs_clflush);
291 		if (ret)
292 			break;
293 
294 		remain -= length;
295 		user_data += length;
296 		offset = 0;
297 	}
298 
299 	i915_gem_object_unlock_fence(obj, fence);
300 	return ret;
301 }
302 
303 static inline bool
304 gtt_user_read(struct io_mapping *mapping,
305 	      loff_t base, int offset,
306 	      char __user *user_data, int length)
307 {
308 	void __iomem *vaddr;
309 	unsigned long unwritten;
310 
311 	/* We can use the cpu mem copy function because this is X86. */
312 	vaddr = io_mapping_map_atomic_wc(mapping, base);
313 	unwritten = __copy_to_user_inatomic(user_data,
314 					    (void __force *)vaddr + offset,
315 					    length);
316 	io_mapping_unmap_atomic(vaddr);
317 	if (unwritten) {
318 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
319 		unwritten = copy_to_user(user_data,
320 					 (void __force *)vaddr + offset,
321 					 length);
322 		io_mapping_unmap(vaddr);
323 	}
324 	return unwritten;
325 }
326 
327 static int
328 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
329 		   const struct drm_i915_gem_pread *args)
330 {
331 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
332 	struct i915_ggtt *ggtt = &i915->ggtt;
333 	intel_wakeref_t wakeref;
334 	struct drm_mm_node node;
335 	struct dma_fence *fence;
336 	void __user *user_data;
337 	struct i915_vma *vma;
338 	u64 remain, offset;
339 	int ret;
340 
341 	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
342 	if (ret)
343 		return ret;
344 
345 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
346 	vma = ERR_PTR(-ENODEV);
347 	if (!i915_gem_object_is_tiled(obj))
348 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
349 					       PIN_MAPPABLE |
350 					       PIN_NONBLOCK /* NOWARN */ |
351 					       PIN_NOEVICT);
352 	if (!IS_ERR(vma)) {
353 		node.start = i915_ggtt_offset(vma);
354 		node.allocated = false;
355 	} else {
356 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
357 		if (ret)
358 			goto out_unlock;
359 		GEM_BUG_ON(!node.allocated);
360 	}
361 
362 	mutex_unlock(&i915->drm.struct_mutex);
363 
364 	ret = i915_gem_object_lock_interruptible(obj);
365 	if (ret)
366 		goto out_unpin;
367 
368 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
369 	if (ret) {
370 		i915_gem_object_unlock(obj);
371 		goto out_unpin;
372 	}
373 
374 	fence = i915_gem_object_lock_fence(obj);
375 	i915_gem_object_unlock(obj);
376 	if (!fence) {
377 		ret = -ENOMEM;
378 		goto out_unpin;
379 	}
380 
381 	user_data = u64_to_user_ptr(args->data_ptr);
382 	remain = args->size;
383 	offset = args->offset;
384 
385 	while (remain > 0) {
386 		/* Operation in this page
387 		 *
388 		 * page_base = page offset within aperture
389 		 * page_offset = offset within page
390 		 * page_length = bytes to copy for this page
391 		 */
392 		u32 page_base = node.start;
393 		unsigned page_offset = offset_in_page(offset);
394 		unsigned page_length = PAGE_SIZE - page_offset;
395 		page_length = remain < page_length ? remain : page_length;
396 		if (node.allocated) {
397 			ggtt->vm.insert_page(&ggtt->vm,
398 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
399 					     node.start, I915_CACHE_NONE, 0);
400 		} else {
401 			page_base += offset & PAGE_MASK;
402 		}
403 
404 		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
405 				  user_data, page_length)) {
406 			ret = -EFAULT;
407 			break;
408 		}
409 
410 		remain -= page_length;
411 		user_data += page_length;
412 		offset += page_length;
413 	}
414 
415 	i915_gem_object_unlock_fence(obj, fence);
416 out_unpin:
417 	mutex_lock(&i915->drm.struct_mutex);
418 	if (node.allocated) {
419 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
420 		remove_mappable_node(&node);
421 	} else {
422 		i915_vma_unpin(vma);
423 	}
424 out_unlock:
425 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
426 	mutex_unlock(&i915->drm.struct_mutex);
427 
428 	return ret;
429 }
430 
431 /**
432  * Reads data from the object referenced by handle.
433  * @dev: drm device pointer
434  * @data: ioctl data blob
435  * @file: drm file pointer
436  *
437  * On error, the contents of *data are undefined.
438  */
439 int
440 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
441 		     struct drm_file *file)
442 {
443 	struct drm_i915_gem_pread *args = data;
444 	struct drm_i915_gem_object *obj;
445 	int ret;
446 
447 	if (args->size == 0)
448 		return 0;
449 
450 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
451 		       args->size))
452 		return -EFAULT;
453 
454 	obj = i915_gem_object_lookup(file, args->handle);
455 	if (!obj)
456 		return -ENOENT;
457 
458 	/* Bounds check source.  */
459 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
460 		ret = -EINVAL;
461 		goto out;
462 	}
463 
464 	trace_i915_gem_object_pread(obj, args->offset, args->size);
465 
466 	ret = i915_gem_object_wait(obj,
467 				   I915_WAIT_INTERRUPTIBLE,
468 				   MAX_SCHEDULE_TIMEOUT);
469 	if (ret)
470 		goto out;
471 
472 	ret = i915_gem_object_pin_pages(obj);
473 	if (ret)
474 		goto out;
475 
476 	ret = i915_gem_shmem_pread(obj, args);
477 	if (ret == -EFAULT || ret == -ENODEV)
478 		ret = i915_gem_gtt_pread(obj, args);
479 
480 	i915_gem_object_unpin_pages(obj);
481 out:
482 	i915_gem_object_put(obj);
483 	return ret;
484 }
485 
486 /* This is the fast write path which cannot handle
487  * page faults in the source data
488  */
489 
490 static inline bool
491 ggtt_write(struct io_mapping *mapping,
492 	   loff_t base, int offset,
493 	   char __user *user_data, int length)
494 {
495 	void __iomem *vaddr;
496 	unsigned long unwritten;
497 
498 	/* We can use the cpu mem copy function because this is X86. */
499 	vaddr = io_mapping_map_atomic_wc(mapping, base);
500 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
501 						      user_data, length);
502 	io_mapping_unmap_atomic(vaddr);
503 	if (unwritten) {
504 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
505 		unwritten = copy_from_user((void __force *)vaddr + offset,
506 					   user_data, length);
507 		io_mapping_unmap(vaddr);
508 	}
509 
510 	return unwritten;
511 }
512 
513 /**
514  * This is the fast pwrite path, where we copy the data directly from the
515  * user into the GTT, uncached.
516  * @obj: i915 GEM object
517  * @args: pwrite arguments structure
518  */
519 static int
520 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
521 			 const struct drm_i915_gem_pwrite *args)
522 {
523 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
524 	struct i915_ggtt *ggtt = &i915->ggtt;
525 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
526 	intel_wakeref_t wakeref;
527 	struct drm_mm_node node;
528 	struct dma_fence *fence;
529 	struct i915_vma *vma;
530 	u64 remain, offset;
531 	void __user *user_data;
532 	int ret;
533 
534 	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
535 	if (ret)
536 		return ret;
537 
538 	if (i915_gem_object_has_struct_page(obj)) {
539 		/*
540 		 * Avoid waking the device up if we can fallback, as
541 		 * waking/resuming is very slow (worst-case 10-100 ms
542 		 * depending on PCI sleeps and our own resume time).
543 		 * This easily dwarfs any performance advantage from
544 		 * using the cache bypass of indirect GGTT access.
545 		 */
546 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
547 		if (!wakeref) {
548 			ret = -EFAULT;
549 			goto out_unlock;
550 		}
551 	} else {
552 		/* No backing pages, no fallback, we must force GGTT access */
553 		wakeref = intel_runtime_pm_get(rpm);
554 	}
555 
556 	vma = ERR_PTR(-ENODEV);
557 	if (!i915_gem_object_is_tiled(obj))
558 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
559 					       PIN_MAPPABLE |
560 					       PIN_NONBLOCK /* NOWARN */ |
561 					       PIN_NOEVICT);
562 	if (!IS_ERR(vma)) {
563 		node.start = i915_ggtt_offset(vma);
564 		node.allocated = false;
565 	} else {
566 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
567 		if (ret)
568 			goto out_rpm;
569 		GEM_BUG_ON(!node.allocated);
570 	}
571 
572 	mutex_unlock(&i915->drm.struct_mutex);
573 
574 	ret = i915_gem_object_lock_interruptible(obj);
575 	if (ret)
576 		goto out_unpin;
577 
578 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
579 	if (ret) {
580 		i915_gem_object_unlock(obj);
581 		goto out_unpin;
582 	}
583 
584 	fence = i915_gem_object_lock_fence(obj);
585 	i915_gem_object_unlock(obj);
586 	if (!fence) {
587 		ret = -ENOMEM;
588 		goto out_unpin;
589 	}
590 
591 	intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
592 
593 	user_data = u64_to_user_ptr(args->data_ptr);
594 	offset = args->offset;
595 	remain = args->size;
596 	while (remain) {
597 		/* Operation in this page
598 		 *
599 		 * page_base = page offset within aperture
600 		 * page_offset = offset within page
601 		 * page_length = bytes to copy for this page
602 		 */
603 		u32 page_base = node.start;
604 		unsigned int page_offset = offset_in_page(offset);
605 		unsigned int page_length = PAGE_SIZE - page_offset;
606 		page_length = remain < page_length ? remain : page_length;
607 		if (node.allocated) {
608 			/* flush the write before we modify the GGTT */
609 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
610 			ggtt->vm.insert_page(&ggtt->vm,
611 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
612 					     node.start, I915_CACHE_NONE, 0);
613 			wmb(); /* flush modifications to the GGTT (insert_page) */
614 		} else {
615 			page_base += offset & PAGE_MASK;
616 		}
617 		/* If we get a fault while copying data, then (presumably) our
618 		 * source page isn't available.  Return the error and we'll
619 		 * retry in the slow path.
620 		 * If the object is non-shmem backed, we retry again with the
621 		 * path that handles page fault.
622 		 */
623 		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
624 			       user_data, page_length)) {
625 			ret = -EFAULT;
626 			break;
627 		}
628 
629 		remain -= page_length;
630 		user_data += page_length;
631 		offset += page_length;
632 	}
633 	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
634 
635 	i915_gem_object_unlock_fence(obj, fence);
636 out_unpin:
637 	mutex_lock(&i915->drm.struct_mutex);
638 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
639 	if (node.allocated) {
640 		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
641 		remove_mappable_node(&node);
642 	} else {
643 		i915_vma_unpin(vma);
644 	}
645 out_rpm:
646 	intel_runtime_pm_put(rpm, wakeref);
647 out_unlock:
648 	mutex_unlock(&i915->drm.struct_mutex);
649 	return ret;
650 }
651 
652 /* Per-page copy function for the shmem pwrite fastpath.
653  * Flushes invalid cachelines before writing to the target if
654  * needs_clflush_before is set and flushes out any written cachelines after
655  * writing if needs_clflush is set.
656  */
657 static int
658 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
659 	     bool needs_clflush_before,
660 	     bool needs_clflush_after)
661 {
662 	char *vaddr;
663 	int ret;
664 
665 	vaddr = kmap(page);
666 
667 	if (needs_clflush_before)
668 		drm_clflush_virt_range(vaddr + offset, len);
669 
670 	ret = __copy_from_user(vaddr + offset, user_data, len);
671 	if (!ret && needs_clflush_after)
672 		drm_clflush_virt_range(vaddr + offset, len);
673 
674 	kunmap(page);
675 
676 	return ret ? -EFAULT : 0;
677 }
678 
679 static int
680 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
681 		      const struct drm_i915_gem_pwrite *args)
682 {
683 	unsigned int partial_cacheline_write;
684 	unsigned int needs_clflush;
685 	unsigned int offset, idx;
686 	struct dma_fence *fence;
687 	void __user *user_data;
688 	u64 remain;
689 	int ret;
690 
691 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
692 	if (ret)
693 		return ret;
694 
695 	fence = i915_gem_object_lock_fence(obj);
696 	i915_gem_object_finish_access(obj);
697 	if (!fence)
698 		return -ENOMEM;
699 
700 	/* If we don't overwrite a cacheline completely we need to be
701 	 * careful to have up-to-date data by first clflushing. Don't
702 	 * overcomplicate things and flush the entire patch.
703 	 */
704 	partial_cacheline_write = 0;
705 	if (needs_clflush & CLFLUSH_BEFORE)
706 		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
707 
708 	user_data = u64_to_user_ptr(args->data_ptr);
709 	remain = args->size;
710 	offset = offset_in_page(args->offset);
711 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
712 		struct page *page = i915_gem_object_get_page(obj, idx);
713 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
714 
715 		ret = shmem_pwrite(page, offset, length, user_data,
716 				   (offset | length) & partial_cacheline_write,
717 				   needs_clflush & CLFLUSH_AFTER);
718 		if (ret)
719 			break;
720 
721 		remain -= length;
722 		user_data += length;
723 		offset = 0;
724 	}
725 
726 	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
727 	i915_gem_object_unlock_fence(obj, fence);
728 
729 	return ret;
730 }
731 
732 /**
733  * Writes data to the object referenced by handle.
734  * @dev: drm device
735  * @data: ioctl data blob
736  * @file: drm file
737  *
738  * On error, the contents of the buffer that were to be modified are undefined.
739  */
740 int
741 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
742 		      struct drm_file *file)
743 {
744 	struct drm_i915_gem_pwrite *args = data;
745 	struct drm_i915_gem_object *obj;
746 	int ret;
747 
748 	if (args->size == 0)
749 		return 0;
750 
751 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
752 		return -EFAULT;
753 
754 	obj = i915_gem_object_lookup(file, args->handle);
755 	if (!obj)
756 		return -ENOENT;
757 
758 	/* Bounds check destination. */
759 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
760 		ret = -EINVAL;
761 		goto err;
762 	}
763 
764 	/* Writes not allowed into this read-only object */
765 	if (i915_gem_object_is_readonly(obj)) {
766 		ret = -EINVAL;
767 		goto err;
768 	}
769 
770 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
771 
772 	ret = -ENODEV;
773 	if (obj->ops->pwrite)
774 		ret = obj->ops->pwrite(obj, args);
775 	if (ret != -ENODEV)
776 		goto err;
777 
778 	ret = i915_gem_object_wait(obj,
779 				   I915_WAIT_INTERRUPTIBLE |
780 				   I915_WAIT_ALL,
781 				   MAX_SCHEDULE_TIMEOUT);
782 	if (ret)
783 		goto err;
784 
785 	ret = i915_gem_object_pin_pages(obj);
786 	if (ret)
787 		goto err;
788 
789 	ret = -EFAULT;
790 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
791 	 * it would end up going through the fenced access, and we'll get
792 	 * different detiling behavior between reading and writing.
793 	 * pread/pwrite currently are reading and writing from the CPU
794 	 * perspective, requiring manual detiling by the client.
795 	 */
796 	if (!i915_gem_object_has_struct_page(obj) ||
797 	    cpu_write_needs_clflush(obj))
798 		/* Note that the gtt paths might fail with non-page-backed user
799 		 * pointers (e.g. gtt mappings when moving data between
800 		 * textures). Fallback to the shmem path in that case.
801 		 */
802 		ret = i915_gem_gtt_pwrite_fast(obj, args);
803 
804 	if (ret == -EFAULT || ret == -ENOSPC) {
805 		if (obj->phys_handle)
806 			ret = i915_gem_phys_pwrite(obj, args, file);
807 		else
808 			ret = i915_gem_shmem_pwrite(obj, args);
809 	}
810 
811 	i915_gem_object_unpin_pages(obj);
812 err:
813 	i915_gem_object_put(obj);
814 	return ret;
815 }
816 
817 /**
818  * Called when user space has done writes to this buffer
819  * @dev: drm device
820  * @data: ioctl data blob
821  * @file: drm file
822  */
823 int
824 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
825 			 struct drm_file *file)
826 {
827 	struct drm_i915_gem_sw_finish *args = data;
828 	struct drm_i915_gem_object *obj;
829 
830 	obj = i915_gem_object_lookup(file, args->handle);
831 	if (!obj)
832 		return -ENOENT;
833 
834 	/*
835 	 * Proxy objects are barred from CPU access, so there is no
836 	 * need to ban sw_finish as it is a nop.
837 	 */
838 
839 	/* Pinned buffers may be scanout, so flush the cache */
840 	i915_gem_object_flush_if_display(obj);
841 	i915_gem_object_put(obj);
842 
843 	return 0;
844 }
845 
846 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
847 {
848 	struct drm_i915_gem_object *obj, *on;
849 	int i;
850 
851 	/*
852 	 * Only called during RPM suspend. All users of the userfault_list
853 	 * must be holding an RPM wakeref to ensure that this can not
854 	 * run concurrently with themselves (and use the struct_mutex for
855 	 * protection between themselves).
856 	 */
857 
858 	list_for_each_entry_safe(obj, on,
859 				 &i915->ggtt.userfault_list, userfault_link)
860 		__i915_gem_object_release_mmap(obj);
861 
862 	/*
863 	 * The fence will be lost when the device powers down. If any were
864 	 * in use by hardware (i.e. they are pinned), we should not be powering
865 	 * down! All other fences will be reacquired by the user upon waking.
866 	 */
867 	for (i = 0; i < i915->ggtt.num_fences; i++) {
868 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
869 
870 		/*
871 		 * Ideally we want to assert that the fence register is not
872 		 * live at this point (i.e. that no piece of code will be
873 		 * trying to write through fence + GTT, as that both violates
874 		 * our tracking of activity and associated locking/barriers,
875 		 * but also is illegal given that the hw is powered down).
876 		 *
877 		 * Previously we used reg->pin_count as a "liveness" indicator.
878 		 * That is not sufficient, and we need a more fine-grained
879 		 * tool if we want to have a sanity check here.
880 		 */
881 
882 		if (!reg->vma)
883 			continue;
884 
885 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
886 		reg->dirty = true;
887 	}
888 }
889 
890 static long
891 wait_for_timelines(struct drm_i915_private *i915,
892 		   unsigned int wait, long timeout)
893 {
894 	struct intel_gt_timelines *timelines = &i915->gt.timelines;
895 	struct intel_timeline *tl;
896 	unsigned long flags;
897 
898 	spin_lock_irqsave(&timelines->lock, flags);
899 	list_for_each_entry(tl, &timelines->active_list, link) {
900 		struct i915_request *rq;
901 
902 		rq = i915_active_request_get_unlocked(&tl->last_request);
903 		if (!rq)
904 			continue;
905 
906 		spin_unlock_irqrestore(&timelines->lock, flags);
907 
908 		/*
909 		 * "Race-to-idle".
910 		 *
911 		 * Switching to the kernel context is often used a synchronous
912 		 * step prior to idling, e.g. in suspend for flushing all
913 		 * current operations to memory before sleeping. These we
914 		 * want to complete as quickly as possible to avoid prolonged
915 		 * stalls, so allow the gpu to boost to maximum clocks.
916 		 */
917 		if (wait & I915_WAIT_FOR_IDLE_BOOST)
918 			gen6_rps_boost(rq);
919 
920 		timeout = i915_request_wait(rq, wait, timeout);
921 		i915_request_put(rq);
922 		if (timeout < 0)
923 			return timeout;
924 
925 		/* restart after reacquiring the lock */
926 		spin_lock_irqsave(&timelines->lock, flags);
927 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
928 	}
929 	spin_unlock_irqrestore(&timelines->lock, flags);
930 
931 	return timeout;
932 }
933 
934 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
935 			   unsigned int flags, long timeout)
936 {
937 	/* If the device is asleep, we have no requests outstanding */
938 	if (!intel_gt_pm_is_awake(&i915->gt))
939 		return 0;
940 
941 	GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
942 		  flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
943 		  timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
944 
945 	timeout = wait_for_timelines(i915, flags, timeout);
946 	if (timeout < 0)
947 		return timeout;
948 
949 	if (flags & I915_WAIT_LOCKED) {
950 		lockdep_assert_held(&i915->drm.struct_mutex);
951 
952 		i915_retire_requests(i915);
953 	}
954 
955 	return 0;
956 }
957 
958 struct i915_vma *
959 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
960 			 const struct i915_ggtt_view *view,
961 			 u64 size,
962 			 u64 alignment,
963 			 u64 flags)
964 {
965 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
966 	struct i915_address_space *vm = &dev_priv->ggtt.vm;
967 	struct i915_vma *vma;
968 	int ret;
969 
970 	lockdep_assert_held(&obj->base.dev->struct_mutex);
971 
972 	if (flags & PIN_MAPPABLE &&
973 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
974 		/* If the required space is larger than the available
975 		 * aperture, we will not able to find a slot for the
976 		 * object and unbinding the object now will be in
977 		 * vain. Worse, doing so may cause us to ping-pong
978 		 * the object in and out of the Global GTT and
979 		 * waste a lot of cycles under the mutex.
980 		 */
981 		if (obj->base.size > dev_priv->ggtt.mappable_end)
982 			return ERR_PTR(-E2BIG);
983 
984 		/* If NONBLOCK is set the caller is optimistically
985 		 * trying to cache the full object within the mappable
986 		 * aperture, and *must* have a fallback in place for
987 		 * situations where we cannot bind the object. We
988 		 * can be a little more lax here and use the fallback
989 		 * more often to avoid costly migrations of ourselves
990 		 * and other objects within the aperture.
991 		 *
992 		 * Half-the-aperture is used as a simple heuristic.
993 		 * More interesting would to do search for a free
994 		 * block prior to making the commitment to unbind.
995 		 * That caters for the self-harm case, and with a
996 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
997 		 * we could try to minimise harm to others.
998 		 */
999 		if (flags & PIN_NONBLOCK &&
1000 		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
1001 			return ERR_PTR(-ENOSPC);
1002 	}
1003 
1004 	vma = i915_vma_instance(obj, vm, view);
1005 	if (IS_ERR(vma))
1006 		return vma;
1007 
1008 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
1009 		if (flags & PIN_NONBLOCK) {
1010 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1011 				return ERR_PTR(-ENOSPC);
1012 
1013 			if (flags & PIN_MAPPABLE &&
1014 			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1015 				return ERR_PTR(-ENOSPC);
1016 		}
1017 
1018 		WARN(i915_vma_is_pinned(vma),
1019 		     "bo is already pinned in ggtt with incorrect alignment:"
1020 		     " offset=%08x, req.alignment=%llx,"
1021 		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1022 		     i915_ggtt_offset(vma), alignment,
1023 		     !!(flags & PIN_MAPPABLE),
1024 		     i915_vma_is_map_and_fenceable(vma));
1025 		ret = i915_vma_unbind(vma);
1026 		if (ret)
1027 			return ERR_PTR(ret);
1028 	}
1029 
1030 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1031 		mutex_lock(&vma->vm->mutex);
1032 		ret = i915_vma_revoke_fence(vma);
1033 		mutex_unlock(&vma->vm->mutex);
1034 		if (ret)
1035 			return ERR_PTR(ret);
1036 	}
1037 
1038 	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1039 	if (ret)
1040 		return ERR_PTR(ret);
1041 
1042 	return vma;
1043 }
1044 
1045 int
1046 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1047 		       struct drm_file *file_priv)
1048 {
1049 	struct drm_i915_private *i915 = to_i915(dev);
1050 	struct drm_i915_gem_madvise *args = data;
1051 	struct drm_i915_gem_object *obj;
1052 	int err;
1053 
1054 	switch (args->madv) {
1055 	case I915_MADV_DONTNEED:
1056 	case I915_MADV_WILLNEED:
1057 	    break;
1058 	default:
1059 	    return -EINVAL;
1060 	}
1061 
1062 	obj = i915_gem_object_lookup(file_priv, args->handle);
1063 	if (!obj)
1064 		return -ENOENT;
1065 
1066 	err = mutex_lock_interruptible(&obj->mm.lock);
1067 	if (err)
1068 		goto out;
1069 
1070 	if (i915_gem_object_has_pages(obj) &&
1071 	    i915_gem_object_is_tiled(obj) &&
1072 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1073 		if (obj->mm.madv == I915_MADV_WILLNEED) {
1074 			GEM_BUG_ON(!obj->mm.quirked);
1075 			__i915_gem_object_unpin_pages(obj);
1076 			obj->mm.quirked = false;
1077 		}
1078 		if (args->madv == I915_MADV_WILLNEED) {
1079 			GEM_BUG_ON(obj->mm.quirked);
1080 			__i915_gem_object_pin_pages(obj);
1081 			obj->mm.quirked = true;
1082 		}
1083 	}
1084 
1085 	if (obj->mm.madv != __I915_MADV_PURGED)
1086 		obj->mm.madv = args->madv;
1087 
1088 	if (i915_gem_object_has_pages(obj)) {
1089 		struct list_head *list;
1090 
1091 		if (i915_gem_object_is_shrinkable(obj)) {
1092 			unsigned long flags;
1093 
1094 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
1095 
1096 			if (obj->mm.madv != I915_MADV_WILLNEED)
1097 				list = &i915->mm.purge_list;
1098 			else
1099 				list = &i915->mm.shrink_list;
1100 			list_move_tail(&obj->mm.link, list);
1101 
1102 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1103 		}
1104 	}
1105 
1106 	/* if the object is no longer attached, discard its backing storage */
1107 	if (obj->mm.madv == I915_MADV_DONTNEED &&
1108 	    !i915_gem_object_has_pages(obj))
1109 		i915_gem_object_truncate(obj);
1110 
1111 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1112 	mutex_unlock(&obj->mm.lock);
1113 
1114 out:
1115 	i915_gem_object_put(obj);
1116 	return err;
1117 }
1118 
1119 void i915_gem_sanitize(struct drm_i915_private *i915)
1120 {
1121 	intel_wakeref_t wakeref;
1122 
1123 	GEM_TRACE("\n");
1124 
1125 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1126 	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1127 
1128 	/*
1129 	 * As we have just resumed the machine and woken the device up from
1130 	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1131 	 * back to defaults, recovering from whatever wedged state we left it
1132 	 * in and so worth trying to use the device once more.
1133 	 */
1134 	if (intel_gt_is_wedged(&i915->gt))
1135 		intel_gt_unset_wedged(&i915->gt);
1136 
1137 	/*
1138 	 * If we inherit context state from the BIOS or earlier occupants
1139 	 * of the GPU, the GPU may be in an inconsistent state when we
1140 	 * try to take over. The only way to remove the earlier state
1141 	 * is by resetting. However, resetting on earlier gen is tricky as
1142 	 * it may impact the display and we are uncertain about the stability
1143 	 * of the reset, so this could be applied to even earlier gen.
1144 	 */
1145 	intel_gt_sanitize(&i915->gt, false);
1146 
1147 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1148 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1149 }
1150 
1151 static void init_unused_ring(struct intel_gt *gt, u32 base)
1152 {
1153 	struct intel_uncore *uncore = gt->uncore;
1154 
1155 	intel_uncore_write(uncore, RING_CTL(base), 0);
1156 	intel_uncore_write(uncore, RING_HEAD(base), 0);
1157 	intel_uncore_write(uncore, RING_TAIL(base), 0);
1158 	intel_uncore_write(uncore, RING_START(base), 0);
1159 }
1160 
1161 static void init_unused_rings(struct intel_gt *gt)
1162 {
1163 	struct drm_i915_private *i915 = gt->i915;
1164 
1165 	if (IS_I830(i915)) {
1166 		init_unused_ring(gt, PRB1_BASE);
1167 		init_unused_ring(gt, SRB0_BASE);
1168 		init_unused_ring(gt, SRB1_BASE);
1169 		init_unused_ring(gt, SRB2_BASE);
1170 		init_unused_ring(gt, SRB3_BASE);
1171 	} else if (IS_GEN(i915, 2)) {
1172 		init_unused_ring(gt, SRB0_BASE);
1173 		init_unused_ring(gt, SRB1_BASE);
1174 	} else if (IS_GEN(i915, 3)) {
1175 		init_unused_ring(gt, PRB1_BASE);
1176 		init_unused_ring(gt, PRB2_BASE);
1177 	}
1178 }
1179 
1180 int i915_gem_init_hw(struct drm_i915_private *i915)
1181 {
1182 	struct intel_uncore *uncore = &i915->uncore;
1183 	struct intel_gt *gt = &i915->gt;
1184 	int ret;
1185 
1186 	BUG_ON(!i915->kernel_context);
1187 	ret = intel_gt_terminally_wedged(gt);
1188 	if (ret)
1189 		return ret;
1190 
1191 	gt->last_init_time = ktime_get();
1192 
1193 	/* Double layer security blanket, see i915_gem_init() */
1194 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1195 
1196 	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
1197 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
1198 
1199 	if (IS_HASWELL(i915))
1200 		intel_uncore_write(uncore,
1201 				   MI_PREDICATE_RESULT_2,
1202 				   IS_HSW_GT3(i915) ?
1203 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1204 
1205 	/* Apply the GT workarounds... */
1206 	intel_gt_apply_workarounds(gt);
1207 	/* ...and determine whether they are sticking. */
1208 	intel_gt_verify_workarounds(gt, "init");
1209 
1210 	intel_gt_init_swizzling(gt);
1211 
1212 	/*
1213 	 * At least 830 can leave some of the unused rings
1214 	 * "active" (ie. head != tail) after resume which
1215 	 * will prevent c3 entry. Makes sure all unused rings
1216 	 * are totally idle.
1217 	 */
1218 	init_unused_rings(gt);
1219 
1220 	ret = i915_ppgtt_init_hw(gt);
1221 	if (ret) {
1222 		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1223 		goto out;
1224 	}
1225 
1226 	/* We can't enable contexts until all firmware is loaded */
1227 	ret = intel_uc_init_hw(&gt->uc);
1228 	if (ret) {
1229 		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
1230 		goto out;
1231 	}
1232 
1233 	intel_mocs_init(gt);
1234 
1235 out:
1236 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1237 	return ret;
1238 }
1239 
1240 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1241 {
1242 	struct i915_request *requests[I915_NUM_ENGINES] = {};
1243 	struct intel_engine_cs *engine;
1244 	enum intel_engine_id id;
1245 	int err = 0;
1246 
1247 	/*
1248 	 * As we reset the gpu during very early sanitisation, the current
1249 	 * register state on the GPU should reflect its defaults values.
1250 	 * We load a context onto the hw (with restore-inhibit), then switch
1251 	 * over to a second context to save that default register state. We
1252 	 * can then prime every new context with that state so they all start
1253 	 * from the same default HW values.
1254 	 */
1255 
1256 	for_each_engine(engine, i915, id) {
1257 		struct intel_context *ce;
1258 		struct i915_request *rq;
1259 
1260 		/* We must be able to switch to something! */
1261 		GEM_BUG_ON(!engine->kernel_context);
1262 		engine->serial++; /* force the kernel context switch */
1263 
1264 		ce = intel_context_create(i915->kernel_context, engine);
1265 		if (IS_ERR(ce)) {
1266 			err = PTR_ERR(ce);
1267 			goto out;
1268 		}
1269 
1270 		rq = intel_context_create_request(ce);
1271 		if (IS_ERR(rq)) {
1272 			err = PTR_ERR(rq);
1273 			intel_context_put(ce);
1274 			goto out;
1275 		}
1276 
1277 		err = intel_engine_emit_ctx_wa(rq);
1278 		if (err)
1279 			goto err_rq;
1280 
1281 		/*
1282 		 * Failing to program the MOCS is non-fatal.The system will not
1283 		 * run at peak performance. So warn the user and carry on.
1284 		 */
1285 		err = intel_mocs_emit(rq);
1286 		if (err)
1287 			dev_notice(i915->drm.dev,
1288 				   "Failed to program MOCS registers; expect performance issues.\n");
1289 
1290 		err = intel_renderstate_emit(rq);
1291 		if (err)
1292 			goto err_rq;
1293 
1294 err_rq:
1295 		requests[id] = i915_request_get(rq);
1296 		i915_request_add(rq);
1297 		if (err)
1298 			goto out;
1299 	}
1300 
1301 	/* Flush the default context image to memory, and enable powersaving. */
1302 	if (!i915_gem_load_power_context(i915)) {
1303 		err = -EIO;
1304 		goto out;
1305 	}
1306 
1307 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
1308 		struct i915_request *rq;
1309 		struct i915_vma *state;
1310 		void *vaddr;
1311 
1312 		rq = requests[id];
1313 		if (!rq)
1314 			continue;
1315 
1316 		/* We want to be able to unbind the state from the GGTT */
1317 		GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
1318 
1319 		state = rq->hw_context->state;
1320 		if (!state)
1321 			continue;
1322 
1323 		/*
1324 		 * As we will hold a reference to the logical state, it will
1325 		 * not be torn down with the context, and importantly the
1326 		 * object will hold onto its vma (making it possible for a
1327 		 * stray GTT write to corrupt our defaults). Unmap the vma
1328 		 * from the GTT to prevent such accidents and reclaim the
1329 		 * space.
1330 		 */
1331 		err = i915_vma_unbind(state);
1332 		if (err)
1333 			goto out;
1334 
1335 		i915_gem_object_lock(state->obj);
1336 		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1337 		i915_gem_object_unlock(state->obj);
1338 		if (err)
1339 			goto out;
1340 
1341 		i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
1342 
1343 		/* Check we can acquire the image of the context state */
1344 		vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
1345 		if (IS_ERR(vaddr)) {
1346 			err = PTR_ERR(vaddr);
1347 			goto out;
1348 		}
1349 
1350 		rq->engine->default_state = i915_gem_object_get(state->obj);
1351 		i915_gem_object_unpin_map(state->obj);
1352 	}
1353 
1354 out:
1355 	/*
1356 	 * If we have to abandon now, we expect the engines to be idle
1357 	 * and ready to be torn-down. The quickest way we can accomplish
1358 	 * this is by declaring ourselves wedged.
1359 	 */
1360 	if (err)
1361 		intel_gt_set_wedged(&i915->gt);
1362 
1363 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
1364 		struct intel_context *ce;
1365 		struct i915_request *rq;
1366 
1367 		rq = requests[id];
1368 		if (!rq)
1369 			continue;
1370 
1371 		ce = rq->hw_context;
1372 		i915_request_put(rq);
1373 		intel_context_put(ce);
1374 	}
1375 	return err;
1376 }
1377 
1378 static int
1379 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1380 {
1381 	return intel_gt_init_scratch(&i915->gt, size);
1382 }
1383 
1384 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1385 {
1386 	intel_gt_fini_scratch(&i915->gt);
1387 }
1388 
1389 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1390 {
1391 	struct intel_engine_cs *engine;
1392 	enum intel_engine_id id;
1393 	int err = 0;
1394 
1395 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1396 		return 0;
1397 
1398 	for_each_engine(engine, i915, id) {
1399 		if (intel_engine_verify_workarounds(engine, "load"))
1400 			err = -EIO;
1401 	}
1402 
1403 	return err;
1404 }
1405 
1406 int i915_gem_init(struct drm_i915_private *dev_priv)
1407 {
1408 	int ret;
1409 
1410 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
1411 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1412 		mkwrite_device_info(dev_priv)->page_sizes =
1413 			I915_GTT_PAGE_SIZE_4K;
1414 
1415 	intel_timelines_init(dev_priv);
1416 
1417 	ret = i915_gem_init_userptr(dev_priv);
1418 	if (ret)
1419 		return ret;
1420 
1421 	intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1422 	intel_wopcm_init(&dev_priv->wopcm);
1423 
1424 	/* This is just a security blanket to placate dragons.
1425 	 * On some systems, we very sporadically observe that the first TLBs
1426 	 * used by the CS may be stale, despite us poking the TLB reset. If
1427 	 * we hold the forcewake during initialisation these problems
1428 	 * just magically go away.
1429 	 */
1430 	mutex_lock(&dev_priv->drm.struct_mutex);
1431 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1432 
1433 	ret = i915_init_ggtt(dev_priv);
1434 	if (ret) {
1435 		GEM_BUG_ON(ret == -EIO);
1436 		goto err_unlock;
1437 	}
1438 
1439 	ret = i915_gem_init_scratch(dev_priv,
1440 				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1441 	if (ret) {
1442 		GEM_BUG_ON(ret == -EIO);
1443 		goto err_ggtt;
1444 	}
1445 
1446 	ret = intel_engines_setup(dev_priv);
1447 	if (ret) {
1448 		GEM_BUG_ON(ret == -EIO);
1449 		goto err_unlock;
1450 	}
1451 
1452 	ret = i915_gem_contexts_init(dev_priv);
1453 	if (ret) {
1454 		GEM_BUG_ON(ret == -EIO);
1455 		goto err_scratch;
1456 	}
1457 
1458 	ret = intel_engines_init(dev_priv);
1459 	if (ret) {
1460 		GEM_BUG_ON(ret == -EIO);
1461 		goto err_context;
1462 	}
1463 
1464 	intel_init_gt_powersave(dev_priv);
1465 
1466 	intel_uc_init(&dev_priv->gt.uc);
1467 
1468 	ret = i915_gem_init_hw(dev_priv);
1469 	if (ret)
1470 		goto err_uc_init;
1471 
1472 	/* Only when the HW is re-initialised, can we replay the requests */
1473 	ret = intel_gt_resume(&dev_priv->gt);
1474 	if (ret)
1475 		goto err_init_hw;
1476 
1477 	/*
1478 	 * Despite its name intel_init_clock_gating applies both display
1479 	 * clock gating workarounds; GT mmio workarounds and the occasional
1480 	 * GT power context workaround. Worse, sometimes it includes a context
1481 	 * register workaround which we need to apply before we record the
1482 	 * default HW state for all contexts.
1483 	 *
1484 	 * FIXME: break up the workarounds and apply them at the right time!
1485 	 */
1486 	intel_init_clock_gating(dev_priv);
1487 
1488 	ret = intel_engines_verify_workarounds(dev_priv);
1489 	if (ret)
1490 		goto err_gt;
1491 
1492 	ret = __intel_engines_record_defaults(dev_priv);
1493 	if (ret)
1494 		goto err_gt;
1495 
1496 	ret = i915_inject_load_error(dev_priv, -ENODEV);
1497 	if (ret)
1498 		goto err_gt;
1499 
1500 	ret = i915_inject_load_error(dev_priv, -EIO);
1501 	if (ret)
1502 		goto err_gt;
1503 
1504 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1505 	mutex_unlock(&dev_priv->drm.struct_mutex);
1506 
1507 	return 0;
1508 
1509 	/*
1510 	 * Unwinding is complicated by that we want to handle -EIO to mean
1511 	 * disable GPU submission but keep KMS alive. We want to mark the
1512 	 * HW as irrevisibly wedged, but keep enough state around that the
1513 	 * driver doesn't explode during runtime.
1514 	 */
1515 err_gt:
1516 	mutex_unlock(&dev_priv->drm.struct_mutex);
1517 
1518 	intel_gt_set_wedged(&dev_priv->gt);
1519 	i915_gem_suspend(dev_priv);
1520 	i915_gem_suspend_late(dev_priv);
1521 
1522 	i915_gem_drain_workqueue(dev_priv);
1523 
1524 	mutex_lock(&dev_priv->drm.struct_mutex);
1525 err_init_hw:
1526 	intel_uc_fini_hw(&dev_priv->gt.uc);
1527 err_uc_init:
1528 	if (ret != -EIO) {
1529 		intel_uc_fini(&dev_priv->gt.uc);
1530 		intel_cleanup_gt_powersave(dev_priv);
1531 		intel_engines_cleanup(dev_priv);
1532 	}
1533 err_context:
1534 	if (ret != -EIO)
1535 		i915_gem_contexts_fini(dev_priv);
1536 err_scratch:
1537 	i915_gem_fini_scratch(dev_priv);
1538 err_ggtt:
1539 err_unlock:
1540 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1541 	mutex_unlock(&dev_priv->drm.struct_mutex);
1542 
1543 	if (ret != -EIO) {
1544 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1545 		i915_gem_cleanup_userptr(dev_priv);
1546 		intel_timelines_fini(dev_priv);
1547 	}
1548 
1549 	if (ret == -EIO) {
1550 		mutex_lock(&dev_priv->drm.struct_mutex);
1551 
1552 		/*
1553 		 * Allow engines or uC initialisation to fail by marking the GPU
1554 		 * as wedged. But we only want to do this when the GPU is angry,
1555 		 * for all other failure, such as an allocation failure, bail.
1556 		 */
1557 		if (!intel_gt_is_wedged(&dev_priv->gt)) {
1558 			i915_probe_error(dev_priv,
1559 					 "Failed to initialize GPU, declaring it wedged!\n");
1560 			intel_gt_set_wedged(&dev_priv->gt);
1561 		}
1562 
1563 		/* Minimal basic recovery for KMS */
1564 		ret = i915_ggtt_enable_hw(dev_priv);
1565 		i915_gem_restore_gtt_mappings(dev_priv);
1566 		i915_gem_restore_fences(dev_priv);
1567 		intel_init_clock_gating(dev_priv);
1568 
1569 		mutex_unlock(&dev_priv->drm.struct_mutex);
1570 	}
1571 
1572 	i915_gem_drain_freed_objects(dev_priv);
1573 	return ret;
1574 }
1575 
1576 void i915_gem_driver_register(struct drm_i915_private *i915)
1577 {
1578 	i915_gem_driver_register__shrinker(i915);
1579 
1580 	intel_engines_driver_register(i915);
1581 }
1582 
1583 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1584 {
1585 	i915_gem_driver_unregister__shrinker(i915);
1586 }
1587 
1588 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1589 {
1590 	GEM_BUG_ON(dev_priv->gt.awake);
1591 
1592 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1593 
1594 	i915_gem_suspend_late(dev_priv);
1595 	intel_disable_gt_powersave(dev_priv);
1596 
1597 	/* Flush any outstanding unpin_work. */
1598 	i915_gem_drain_workqueue(dev_priv);
1599 
1600 	mutex_lock(&dev_priv->drm.struct_mutex);
1601 	intel_uc_fini_hw(&dev_priv->gt.uc);
1602 	intel_uc_fini(&dev_priv->gt.uc);
1603 	mutex_unlock(&dev_priv->drm.struct_mutex);
1604 
1605 	i915_gem_drain_freed_objects(dev_priv);
1606 }
1607 
1608 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1609 {
1610 	mutex_lock(&dev_priv->drm.struct_mutex);
1611 	intel_engines_cleanup(dev_priv);
1612 	i915_gem_contexts_fini(dev_priv);
1613 	i915_gem_fini_scratch(dev_priv);
1614 	mutex_unlock(&dev_priv->drm.struct_mutex);
1615 
1616 	intel_wa_list_free(&dev_priv->gt_wa_list);
1617 
1618 	intel_cleanup_gt_powersave(dev_priv);
1619 
1620 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1621 	i915_gem_cleanup_userptr(dev_priv);
1622 	intel_timelines_fini(dev_priv);
1623 
1624 	i915_gem_drain_freed_objects(dev_priv);
1625 
1626 	WARN_ON(!list_empty(&dev_priv->contexts.list));
1627 }
1628 
1629 void i915_gem_init_mmio(struct drm_i915_private *i915)
1630 {
1631 	i915_gem_sanitize(i915);
1632 }
1633 
1634 static void i915_gem_init__mm(struct drm_i915_private *i915)
1635 {
1636 	spin_lock_init(&i915->mm.obj_lock);
1637 
1638 	init_llist_head(&i915->mm.free_list);
1639 
1640 	INIT_LIST_HEAD(&i915->mm.purge_list);
1641 	INIT_LIST_HEAD(&i915->mm.shrink_list);
1642 
1643 	i915_gem_init__objects(i915);
1644 }
1645 
1646 int i915_gem_init_early(struct drm_i915_private *dev_priv)
1647 {
1648 	int err;
1649 
1650 	i915_gem_init__mm(dev_priv);
1651 	i915_gem_init__pm(dev_priv);
1652 
1653 	spin_lock_init(&dev_priv->fb_tracking.lock);
1654 
1655 	err = i915_gemfs_init(dev_priv);
1656 	if (err)
1657 		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1658 
1659 	return 0;
1660 }
1661 
1662 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1663 {
1664 	i915_gem_drain_freed_objects(dev_priv);
1665 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1666 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1667 	WARN_ON(dev_priv->mm.shrink_count);
1668 
1669 	i915_gemfs_fini(dev_priv);
1670 }
1671 
1672 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1673 {
1674 	/* Discard all purgeable objects, let userspace recover those as
1675 	 * required after resuming.
1676 	 */
1677 	i915_gem_shrink_all(dev_priv);
1678 
1679 	return 0;
1680 }
1681 
1682 int i915_gem_freeze_late(struct drm_i915_private *i915)
1683 {
1684 	struct drm_i915_gem_object *obj;
1685 	intel_wakeref_t wakeref;
1686 
1687 	/*
1688 	 * Called just before we write the hibernation image.
1689 	 *
1690 	 * We need to update the domain tracking to reflect that the CPU
1691 	 * will be accessing all the pages to create and restore from the
1692 	 * hibernation, and so upon restoration those pages will be in the
1693 	 * CPU domain.
1694 	 *
1695 	 * To make sure the hibernation image contains the latest state,
1696 	 * we update that state just before writing out the image.
1697 	 *
1698 	 * To try and reduce the hibernation image, we manually shrink
1699 	 * the objects as well, see i915_gem_freeze()
1700 	 */
1701 
1702 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1703 
1704 	i915_gem_shrink(i915, -1UL, NULL, ~0);
1705 	i915_gem_drain_freed_objects(i915);
1706 
1707 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1708 		i915_gem_object_lock(obj);
1709 		WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1710 		i915_gem_object_unlock(obj);
1711 	}
1712 
1713 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1714 
1715 	return 0;
1716 }
1717 
1718 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1719 {
1720 	struct drm_i915_file_private *file_priv = file->driver_priv;
1721 	struct i915_request *request;
1722 
1723 	/* Clean up our request list when the client is going away, so that
1724 	 * later retire_requests won't dereference our soon-to-be-gone
1725 	 * file_priv.
1726 	 */
1727 	spin_lock(&file_priv->mm.lock);
1728 	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1729 		request->file_priv = NULL;
1730 	spin_unlock(&file_priv->mm.lock);
1731 }
1732 
1733 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1734 {
1735 	struct drm_i915_file_private *file_priv;
1736 	int ret;
1737 
1738 	DRM_DEBUG("\n");
1739 
1740 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1741 	if (!file_priv)
1742 		return -ENOMEM;
1743 
1744 	file->driver_priv = file_priv;
1745 	file_priv->dev_priv = i915;
1746 	file_priv->file = file;
1747 
1748 	spin_lock_init(&file_priv->mm.lock);
1749 	INIT_LIST_HEAD(&file_priv->mm.request_list);
1750 
1751 	file_priv->bsd_engine = -1;
1752 	file_priv->hang_timestamp = jiffies;
1753 
1754 	ret = i915_gem_context_open(i915, file);
1755 	if (ret)
1756 		kfree(file_priv);
1757 
1758 	return ret;
1759 }
1760 
1761 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1762 #include "selftests/mock_gem_device.c"
1763 #include "selftests/i915_gem.c"
1764 #endif
1765