xref: /openbmc/linux/drivers/gpu/drm/i915/i915_gem.c (revision 95b384f9)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_mocs.h"
36 #include <linux/shmem_fs.h>
37 #include <linux/slab.h>
38 #include <linux/swap.h>
39 #include <linux/pci.h>
40 #include <linux/dma-buf.h>
41 
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
44 static void
45 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
46 static void
47 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
48 
49 static bool cpu_cache_is_coherent(struct drm_device *dev,
50 				  enum i915_cache_level level)
51 {
52 	return HAS_LLC(dev) || level != I915_CACHE_NONE;
53 }
54 
55 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56 {
57 	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
58 		return true;
59 
60 	return obj->pin_display;
61 }
62 
63 /* some bookkeeping */
64 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 				  size_t size)
66 {
67 	spin_lock(&dev_priv->mm.object_stat_lock);
68 	dev_priv->mm.object_count++;
69 	dev_priv->mm.object_memory += size;
70 	spin_unlock(&dev_priv->mm.object_stat_lock);
71 }
72 
73 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
74 				     size_t size)
75 {
76 	spin_lock(&dev_priv->mm.object_stat_lock);
77 	dev_priv->mm.object_count--;
78 	dev_priv->mm.object_memory -= size;
79 	spin_unlock(&dev_priv->mm.object_stat_lock);
80 }
81 
82 static int
83 i915_gem_wait_for_error(struct i915_gpu_error *error)
84 {
85 	int ret;
86 
87 	if (!i915_reset_in_progress(error))
88 		return 0;
89 
90 	/*
91 	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
92 	 * userspace. If it takes that long something really bad is going on and
93 	 * we should simply try to bail out and fail as gracefully as possible.
94 	 */
95 	ret = wait_event_interruptible_timeout(error->reset_queue,
96 					       !i915_reset_in_progress(error),
97 					       10*HZ);
98 	if (ret == 0) {
99 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
100 		return -EIO;
101 	} else if (ret < 0) {
102 		return ret;
103 	} else {
104 		return 0;
105 	}
106 }
107 
108 int i915_mutex_lock_interruptible(struct drm_device *dev)
109 {
110 	struct drm_i915_private *dev_priv = dev->dev_private;
111 	int ret;
112 
113 	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
114 	if (ret)
115 		return ret;
116 
117 	ret = mutex_lock_interruptible(&dev->struct_mutex);
118 	if (ret)
119 		return ret;
120 
121 	WARN_ON(i915_verify_lists(dev));
122 	return 0;
123 }
124 
125 int
126 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
127 			    struct drm_file *file)
128 {
129 	struct drm_i915_private *dev_priv = to_i915(dev);
130 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
131 	struct drm_i915_gem_get_aperture *args = data;
132 	struct i915_vma *vma;
133 	size_t pinned;
134 
135 	pinned = 0;
136 	mutex_lock(&dev->struct_mutex);
137 	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
138 		if (vma->pin_count)
139 			pinned += vma->node.size;
140 	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
141 		if (vma->pin_count)
142 			pinned += vma->node.size;
143 	mutex_unlock(&dev->struct_mutex);
144 
145 	args->aper_size = ggtt->base.total;
146 	args->aper_available_size = args->aper_size - pinned;
147 
148 	return 0;
149 }
150 
151 static int
152 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
153 {
154 	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
155 	char *vaddr = obj->phys_handle->vaddr;
156 	struct sg_table *st;
157 	struct scatterlist *sg;
158 	int i;
159 
160 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
161 		return -EINVAL;
162 
163 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
164 		struct page *page;
165 		char *src;
166 
167 		page = shmem_read_mapping_page(mapping, i);
168 		if (IS_ERR(page))
169 			return PTR_ERR(page);
170 
171 		src = kmap_atomic(page);
172 		memcpy(vaddr, src, PAGE_SIZE);
173 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
174 		kunmap_atomic(src);
175 
176 		put_page(page);
177 		vaddr += PAGE_SIZE;
178 	}
179 
180 	i915_gem_chipset_flush(obj->base.dev);
181 
182 	st = kmalloc(sizeof(*st), GFP_KERNEL);
183 	if (st == NULL)
184 		return -ENOMEM;
185 
186 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
187 		kfree(st);
188 		return -ENOMEM;
189 	}
190 
191 	sg = st->sgl;
192 	sg->offset = 0;
193 	sg->length = obj->base.size;
194 
195 	sg_dma_address(sg) = obj->phys_handle->busaddr;
196 	sg_dma_len(sg) = obj->base.size;
197 
198 	obj->pages = st;
199 	return 0;
200 }
201 
202 static void
203 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
204 {
205 	int ret;
206 
207 	BUG_ON(obj->madv == __I915_MADV_PURGED);
208 
209 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
210 	if (WARN_ON(ret)) {
211 		/* In the event of a disaster, abandon all caches and
212 		 * hope for the best.
213 		 */
214 		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
215 	}
216 
217 	if (obj->madv == I915_MADV_DONTNEED)
218 		obj->dirty = 0;
219 
220 	if (obj->dirty) {
221 		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
222 		char *vaddr = obj->phys_handle->vaddr;
223 		int i;
224 
225 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
226 			struct page *page;
227 			char *dst;
228 
229 			page = shmem_read_mapping_page(mapping, i);
230 			if (IS_ERR(page))
231 				continue;
232 
233 			dst = kmap_atomic(page);
234 			drm_clflush_virt_range(vaddr, PAGE_SIZE);
235 			memcpy(dst, vaddr, PAGE_SIZE);
236 			kunmap_atomic(dst);
237 
238 			set_page_dirty(page);
239 			if (obj->madv == I915_MADV_WILLNEED)
240 				mark_page_accessed(page);
241 			put_page(page);
242 			vaddr += PAGE_SIZE;
243 		}
244 		obj->dirty = 0;
245 	}
246 
247 	sg_free_table(obj->pages);
248 	kfree(obj->pages);
249 }
250 
251 static void
252 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
253 {
254 	drm_pci_free(obj->base.dev, obj->phys_handle);
255 }
256 
257 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
258 	.get_pages = i915_gem_object_get_pages_phys,
259 	.put_pages = i915_gem_object_put_pages_phys,
260 	.release = i915_gem_object_release_phys,
261 };
262 
263 static int
264 drop_pages(struct drm_i915_gem_object *obj)
265 {
266 	struct i915_vma *vma, *next;
267 	int ret;
268 
269 	drm_gem_object_reference(&obj->base);
270 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
271 		if (i915_vma_unbind(vma))
272 			break;
273 
274 	ret = i915_gem_object_put_pages(obj);
275 	drm_gem_object_unreference(&obj->base);
276 
277 	return ret;
278 }
279 
280 int
281 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
282 			    int align)
283 {
284 	drm_dma_handle_t *phys;
285 	int ret;
286 
287 	if (obj->phys_handle) {
288 		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
289 			return -EBUSY;
290 
291 		return 0;
292 	}
293 
294 	if (obj->madv != I915_MADV_WILLNEED)
295 		return -EFAULT;
296 
297 	if (obj->base.filp == NULL)
298 		return -EINVAL;
299 
300 	ret = drop_pages(obj);
301 	if (ret)
302 		return ret;
303 
304 	/* create a new object */
305 	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
306 	if (!phys)
307 		return -ENOMEM;
308 
309 	obj->phys_handle = phys;
310 	obj->ops = &i915_gem_phys_ops;
311 
312 	return i915_gem_object_get_pages(obj);
313 }
314 
315 static int
316 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
317 		     struct drm_i915_gem_pwrite *args,
318 		     struct drm_file *file_priv)
319 {
320 	struct drm_device *dev = obj->base.dev;
321 	void *vaddr = obj->phys_handle->vaddr + args->offset;
322 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
323 	int ret = 0;
324 
325 	/* We manually control the domain here and pretend that it
326 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
327 	 */
328 	ret = i915_gem_object_wait_rendering(obj, false);
329 	if (ret)
330 		return ret;
331 
332 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
333 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
334 		unsigned long unwritten;
335 
336 		/* The physical object once assigned is fixed for the lifetime
337 		 * of the obj, so we can safely drop the lock and continue
338 		 * to access vaddr.
339 		 */
340 		mutex_unlock(&dev->struct_mutex);
341 		unwritten = copy_from_user(vaddr, user_data, args->size);
342 		mutex_lock(&dev->struct_mutex);
343 		if (unwritten) {
344 			ret = -EFAULT;
345 			goto out;
346 		}
347 	}
348 
349 	drm_clflush_virt_range(vaddr, args->size);
350 	i915_gem_chipset_flush(dev);
351 
352 out:
353 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
354 	return ret;
355 }
356 
357 void *i915_gem_object_alloc(struct drm_device *dev)
358 {
359 	struct drm_i915_private *dev_priv = dev->dev_private;
360 	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
361 }
362 
363 void i915_gem_object_free(struct drm_i915_gem_object *obj)
364 {
365 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
366 	kmem_cache_free(dev_priv->objects, obj);
367 }
368 
369 static int
370 i915_gem_create(struct drm_file *file,
371 		struct drm_device *dev,
372 		uint64_t size,
373 		uint32_t *handle_p)
374 {
375 	struct drm_i915_gem_object *obj;
376 	int ret;
377 	u32 handle;
378 
379 	size = roundup(size, PAGE_SIZE);
380 	if (size == 0)
381 		return -EINVAL;
382 
383 	/* Allocate the new object */
384 	obj = i915_gem_alloc_object(dev, size);
385 	if (obj == NULL)
386 		return -ENOMEM;
387 
388 	ret = drm_gem_handle_create(file, &obj->base, &handle);
389 	/* drop reference from allocate - handle holds it now */
390 	drm_gem_object_unreference_unlocked(&obj->base);
391 	if (ret)
392 		return ret;
393 
394 	*handle_p = handle;
395 	return 0;
396 }
397 
398 int
399 i915_gem_dumb_create(struct drm_file *file,
400 		     struct drm_device *dev,
401 		     struct drm_mode_create_dumb *args)
402 {
403 	/* have to work out size/pitch and return them */
404 	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
405 	args->size = args->pitch * args->height;
406 	return i915_gem_create(file, dev,
407 			       args->size, &args->handle);
408 }
409 
410 /**
411  * Creates a new mm object and returns a handle to it.
412  */
413 int
414 i915_gem_create_ioctl(struct drm_device *dev, void *data,
415 		      struct drm_file *file)
416 {
417 	struct drm_i915_gem_create *args = data;
418 
419 	return i915_gem_create(file, dev,
420 			       args->size, &args->handle);
421 }
422 
423 static inline int
424 __copy_to_user_swizzled(char __user *cpu_vaddr,
425 			const char *gpu_vaddr, int gpu_offset,
426 			int length)
427 {
428 	int ret, cpu_offset = 0;
429 
430 	while (length > 0) {
431 		int cacheline_end = ALIGN(gpu_offset + 1, 64);
432 		int this_length = min(cacheline_end - gpu_offset, length);
433 		int swizzled_gpu_offset = gpu_offset ^ 64;
434 
435 		ret = __copy_to_user(cpu_vaddr + cpu_offset,
436 				     gpu_vaddr + swizzled_gpu_offset,
437 				     this_length);
438 		if (ret)
439 			return ret + length;
440 
441 		cpu_offset += this_length;
442 		gpu_offset += this_length;
443 		length -= this_length;
444 	}
445 
446 	return 0;
447 }
448 
449 static inline int
450 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
451 			  const char __user *cpu_vaddr,
452 			  int length)
453 {
454 	int ret, cpu_offset = 0;
455 
456 	while (length > 0) {
457 		int cacheline_end = ALIGN(gpu_offset + 1, 64);
458 		int this_length = min(cacheline_end - gpu_offset, length);
459 		int swizzled_gpu_offset = gpu_offset ^ 64;
460 
461 		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
462 				       cpu_vaddr + cpu_offset,
463 				       this_length);
464 		if (ret)
465 			return ret + length;
466 
467 		cpu_offset += this_length;
468 		gpu_offset += this_length;
469 		length -= this_length;
470 	}
471 
472 	return 0;
473 }
474 
475 /*
476  * Pins the specified object's pages and synchronizes the object with
477  * GPU accesses. Sets needs_clflush to non-zero if the caller should
478  * flush the object from the CPU cache.
479  */
480 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
481 				    int *needs_clflush)
482 {
483 	int ret;
484 
485 	*needs_clflush = 0;
486 
487 	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
488 		return -EINVAL;
489 
490 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
491 		/* If we're not in the cpu read domain, set ourself into the gtt
492 		 * read domain and manually flush cachelines (if required). This
493 		 * optimizes for the case when the gpu will dirty the data
494 		 * anyway again before the next pread happens. */
495 		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
496 							obj->cache_level);
497 		ret = i915_gem_object_wait_rendering(obj, true);
498 		if (ret)
499 			return ret;
500 	}
501 
502 	ret = i915_gem_object_get_pages(obj);
503 	if (ret)
504 		return ret;
505 
506 	i915_gem_object_pin_pages(obj);
507 
508 	return ret;
509 }
510 
511 /* Per-page copy function for the shmem pread fastpath.
512  * Flushes invalid cachelines before reading the target if
513  * needs_clflush is set. */
514 static int
515 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
516 		 char __user *user_data,
517 		 bool page_do_bit17_swizzling, bool needs_clflush)
518 {
519 	char *vaddr;
520 	int ret;
521 
522 	if (unlikely(page_do_bit17_swizzling))
523 		return -EINVAL;
524 
525 	vaddr = kmap_atomic(page);
526 	if (needs_clflush)
527 		drm_clflush_virt_range(vaddr + shmem_page_offset,
528 				       page_length);
529 	ret = __copy_to_user_inatomic(user_data,
530 				      vaddr + shmem_page_offset,
531 				      page_length);
532 	kunmap_atomic(vaddr);
533 
534 	return ret ? -EFAULT : 0;
535 }
536 
537 static void
538 shmem_clflush_swizzled_range(char *addr, unsigned long length,
539 			     bool swizzled)
540 {
541 	if (unlikely(swizzled)) {
542 		unsigned long start = (unsigned long) addr;
543 		unsigned long end = (unsigned long) addr + length;
544 
545 		/* For swizzling simply ensure that we always flush both
546 		 * channels. Lame, but simple and it works. Swizzled
547 		 * pwrite/pread is far from a hotpath - current userspace
548 		 * doesn't use it at all. */
549 		start = round_down(start, 128);
550 		end = round_up(end, 128);
551 
552 		drm_clflush_virt_range((void *)start, end - start);
553 	} else {
554 		drm_clflush_virt_range(addr, length);
555 	}
556 
557 }
558 
559 /* Only difference to the fast-path function is that this can handle bit17
560  * and uses non-atomic copy and kmap functions. */
561 static int
562 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
563 		 char __user *user_data,
564 		 bool page_do_bit17_swizzling, bool needs_clflush)
565 {
566 	char *vaddr;
567 	int ret;
568 
569 	vaddr = kmap(page);
570 	if (needs_clflush)
571 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
572 					     page_length,
573 					     page_do_bit17_swizzling);
574 
575 	if (page_do_bit17_swizzling)
576 		ret = __copy_to_user_swizzled(user_data,
577 					      vaddr, shmem_page_offset,
578 					      page_length);
579 	else
580 		ret = __copy_to_user(user_data,
581 				     vaddr + shmem_page_offset,
582 				     page_length);
583 	kunmap(page);
584 
585 	return ret ? - EFAULT : 0;
586 }
587 
588 static int
589 i915_gem_shmem_pread(struct drm_device *dev,
590 		     struct drm_i915_gem_object *obj,
591 		     struct drm_i915_gem_pread *args,
592 		     struct drm_file *file)
593 {
594 	char __user *user_data;
595 	ssize_t remain;
596 	loff_t offset;
597 	int shmem_page_offset, page_length, ret = 0;
598 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
599 	int prefaulted = 0;
600 	int needs_clflush = 0;
601 	struct sg_page_iter sg_iter;
602 
603 	user_data = u64_to_user_ptr(args->data_ptr);
604 	remain = args->size;
605 
606 	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
607 
608 	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
609 	if (ret)
610 		return ret;
611 
612 	offset = args->offset;
613 
614 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
615 			 offset >> PAGE_SHIFT) {
616 		struct page *page = sg_page_iter_page(&sg_iter);
617 
618 		if (remain <= 0)
619 			break;
620 
621 		/* Operation in this page
622 		 *
623 		 * shmem_page_offset = offset within page in shmem file
624 		 * page_length = bytes to copy for this page
625 		 */
626 		shmem_page_offset = offset_in_page(offset);
627 		page_length = remain;
628 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
629 			page_length = PAGE_SIZE - shmem_page_offset;
630 
631 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
632 			(page_to_phys(page) & (1 << 17)) != 0;
633 
634 		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
635 				       user_data, page_do_bit17_swizzling,
636 				       needs_clflush);
637 		if (ret == 0)
638 			goto next_page;
639 
640 		mutex_unlock(&dev->struct_mutex);
641 
642 		if (likely(!i915.prefault_disable) && !prefaulted) {
643 			ret = fault_in_multipages_writeable(user_data, remain);
644 			/* Userspace is tricking us, but we've already clobbered
645 			 * its pages with the prefault and promised to write the
646 			 * data up to the first fault. Hence ignore any errors
647 			 * and just continue. */
648 			(void)ret;
649 			prefaulted = 1;
650 		}
651 
652 		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
653 				       user_data, page_do_bit17_swizzling,
654 				       needs_clflush);
655 
656 		mutex_lock(&dev->struct_mutex);
657 
658 		if (ret)
659 			goto out;
660 
661 next_page:
662 		remain -= page_length;
663 		user_data += page_length;
664 		offset += page_length;
665 	}
666 
667 out:
668 	i915_gem_object_unpin_pages(obj);
669 
670 	return ret;
671 }
672 
673 /**
674  * Reads data from the object referenced by handle.
675  *
676  * On error, the contents of *data are undefined.
677  */
678 int
679 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
680 		     struct drm_file *file)
681 {
682 	struct drm_i915_gem_pread *args = data;
683 	struct drm_i915_gem_object *obj;
684 	int ret = 0;
685 
686 	if (args->size == 0)
687 		return 0;
688 
689 	if (!access_ok(VERIFY_WRITE,
690 		       u64_to_user_ptr(args->data_ptr),
691 		       args->size))
692 		return -EFAULT;
693 
694 	ret = i915_mutex_lock_interruptible(dev);
695 	if (ret)
696 		return ret;
697 
698 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
699 	if (&obj->base == NULL) {
700 		ret = -ENOENT;
701 		goto unlock;
702 	}
703 
704 	/* Bounds check source.  */
705 	if (args->offset > obj->base.size ||
706 	    args->size > obj->base.size - args->offset) {
707 		ret = -EINVAL;
708 		goto out;
709 	}
710 
711 	/* prime objects have no backing filp to GEM pread/pwrite
712 	 * pages from.
713 	 */
714 	if (!obj->base.filp) {
715 		ret = -EINVAL;
716 		goto out;
717 	}
718 
719 	trace_i915_gem_object_pread(obj, args->offset, args->size);
720 
721 	ret = i915_gem_shmem_pread(dev, obj, args, file);
722 
723 out:
724 	drm_gem_object_unreference(&obj->base);
725 unlock:
726 	mutex_unlock(&dev->struct_mutex);
727 	return ret;
728 }
729 
730 /* This is the fast write path which cannot handle
731  * page faults in the source data
732  */
733 
734 static inline int
735 fast_user_write(struct io_mapping *mapping,
736 		loff_t page_base, int page_offset,
737 		char __user *user_data,
738 		int length)
739 {
740 	void __iomem *vaddr_atomic;
741 	void *vaddr;
742 	unsigned long unwritten;
743 
744 	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
745 	/* We can use the cpu mem copy function because this is X86. */
746 	vaddr = (void __force*)vaddr_atomic + page_offset;
747 	unwritten = __copy_from_user_inatomic_nocache(vaddr,
748 						      user_data, length);
749 	io_mapping_unmap_atomic(vaddr_atomic);
750 	return unwritten;
751 }
752 
753 /**
754  * This is the fast pwrite path, where we copy the data directly from the
755  * user into the GTT, uncached.
756  */
757 static int
758 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
759 			 struct drm_i915_gem_object *obj,
760 			 struct drm_i915_gem_pwrite *args,
761 			 struct drm_file *file)
762 {
763 	struct drm_i915_private *dev_priv = to_i915(dev);
764 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
765 	ssize_t remain;
766 	loff_t offset, page_base;
767 	char __user *user_data;
768 	int page_offset, page_length, ret;
769 
770 	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
771 	if (ret)
772 		goto out;
773 
774 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
775 	if (ret)
776 		goto out_unpin;
777 
778 	ret = i915_gem_object_put_fence(obj);
779 	if (ret)
780 		goto out_unpin;
781 
782 	user_data = u64_to_user_ptr(args->data_ptr);
783 	remain = args->size;
784 
785 	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
786 
787 	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
788 
789 	while (remain > 0) {
790 		/* Operation in this page
791 		 *
792 		 * page_base = page offset within aperture
793 		 * page_offset = offset within page
794 		 * page_length = bytes to copy for this page
795 		 */
796 		page_base = offset & PAGE_MASK;
797 		page_offset = offset_in_page(offset);
798 		page_length = remain;
799 		if ((page_offset + remain) > PAGE_SIZE)
800 			page_length = PAGE_SIZE - page_offset;
801 
802 		/* If we get a fault while copying data, then (presumably) our
803 		 * source page isn't available.  Return the error and we'll
804 		 * retry in the slow path.
805 		 */
806 		if (fast_user_write(ggtt->mappable, page_base,
807 				    page_offset, user_data, page_length)) {
808 			ret = -EFAULT;
809 			goto out_flush;
810 		}
811 
812 		remain -= page_length;
813 		user_data += page_length;
814 		offset += page_length;
815 	}
816 
817 out_flush:
818 	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
819 out_unpin:
820 	i915_gem_object_ggtt_unpin(obj);
821 out:
822 	return ret;
823 }
824 
825 /* Per-page copy function for the shmem pwrite fastpath.
826  * Flushes invalid cachelines before writing to the target if
827  * needs_clflush_before is set and flushes out any written cachelines after
828  * writing if needs_clflush is set. */
829 static int
830 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
831 		  char __user *user_data,
832 		  bool page_do_bit17_swizzling,
833 		  bool needs_clflush_before,
834 		  bool needs_clflush_after)
835 {
836 	char *vaddr;
837 	int ret;
838 
839 	if (unlikely(page_do_bit17_swizzling))
840 		return -EINVAL;
841 
842 	vaddr = kmap_atomic(page);
843 	if (needs_clflush_before)
844 		drm_clflush_virt_range(vaddr + shmem_page_offset,
845 				       page_length);
846 	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
847 					user_data, page_length);
848 	if (needs_clflush_after)
849 		drm_clflush_virt_range(vaddr + shmem_page_offset,
850 				       page_length);
851 	kunmap_atomic(vaddr);
852 
853 	return ret ? -EFAULT : 0;
854 }
855 
856 /* Only difference to the fast-path function is that this can handle bit17
857  * and uses non-atomic copy and kmap functions. */
858 static int
859 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
860 		  char __user *user_data,
861 		  bool page_do_bit17_swizzling,
862 		  bool needs_clflush_before,
863 		  bool needs_clflush_after)
864 {
865 	char *vaddr;
866 	int ret;
867 
868 	vaddr = kmap(page);
869 	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
870 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
871 					     page_length,
872 					     page_do_bit17_swizzling);
873 	if (page_do_bit17_swizzling)
874 		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
875 						user_data,
876 						page_length);
877 	else
878 		ret = __copy_from_user(vaddr + shmem_page_offset,
879 				       user_data,
880 				       page_length);
881 	if (needs_clflush_after)
882 		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
883 					     page_length,
884 					     page_do_bit17_swizzling);
885 	kunmap(page);
886 
887 	return ret ? -EFAULT : 0;
888 }
889 
890 static int
891 i915_gem_shmem_pwrite(struct drm_device *dev,
892 		      struct drm_i915_gem_object *obj,
893 		      struct drm_i915_gem_pwrite *args,
894 		      struct drm_file *file)
895 {
896 	ssize_t remain;
897 	loff_t offset;
898 	char __user *user_data;
899 	int shmem_page_offset, page_length, ret = 0;
900 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
901 	int hit_slowpath = 0;
902 	int needs_clflush_after = 0;
903 	int needs_clflush_before = 0;
904 	struct sg_page_iter sg_iter;
905 
906 	user_data = u64_to_user_ptr(args->data_ptr);
907 	remain = args->size;
908 
909 	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
910 
911 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
912 		/* If we're not in the cpu write domain, set ourself into the gtt
913 		 * write domain and manually flush cachelines (if required). This
914 		 * optimizes for the case when the gpu will use the data
915 		 * right away and we therefore have to clflush anyway. */
916 		needs_clflush_after = cpu_write_needs_clflush(obj);
917 		ret = i915_gem_object_wait_rendering(obj, false);
918 		if (ret)
919 			return ret;
920 	}
921 	/* Same trick applies to invalidate partially written cachelines read
922 	 * before writing. */
923 	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
924 		needs_clflush_before =
925 			!cpu_cache_is_coherent(dev, obj->cache_level);
926 
927 	ret = i915_gem_object_get_pages(obj);
928 	if (ret)
929 		return ret;
930 
931 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
932 
933 	i915_gem_object_pin_pages(obj);
934 
935 	offset = args->offset;
936 	obj->dirty = 1;
937 
938 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
939 			 offset >> PAGE_SHIFT) {
940 		struct page *page = sg_page_iter_page(&sg_iter);
941 		int partial_cacheline_write;
942 
943 		if (remain <= 0)
944 			break;
945 
946 		/* Operation in this page
947 		 *
948 		 * shmem_page_offset = offset within page in shmem file
949 		 * page_length = bytes to copy for this page
950 		 */
951 		shmem_page_offset = offset_in_page(offset);
952 
953 		page_length = remain;
954 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
955 			page_length = PAGE_SIZE - shmem_page_offset;
956 
957 		/* If we don't overwrite a cacheline completely we need to be
958 		 * careful to have up-to-date data by first clflushing. Don't
959 		 * overcomplicate things and flush the entire patch. */
960 		partial_cacheline_write = needs_clflush_before &&
961 			((shmem_page_offset | page_length)
962 				& (boot_cpu_data.x86_clflush_size - 1));
963 
964 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
965 			(page_to_phys(page) & (1 << 17)) != 0;
966 
967 		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
968 					user_data, page_do_bit17_swizzling,
969 					partial_cacheline_write,
970 					needs_clflush_after);
971 		if (ret == 0)
972 			goto next_page;
973 
974 		hit_slowpath = 1;
975 		mutex_unlock(&dev->struct_mutex);
976 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
977 					user_data, page_do_bit17_swizzling,
978 					partial_cacheline_write,
979 					needs_clflush_after);
980 
981 		mutex_lock(&dev->struct_mutex);
982 
983 		if (ret)
984 			goto out;
985 
986 next_page:
987 		remain -= page_length;
988 		user_data += page_length;
989 		offset += page_length;
990 	}
991 
992 out:
993 	i915_gem_object_unpin_pages(obj);
994 
995 	if (hit_slowpath) {
996 		/*
997 		 * Fixup: Flush cpu caches in case we didn't flush the dirty
998 		 * cachelines in-line while writing and the object moved
999 		 * out of the cpu write domain while we've dropped the lock.
1000 		 */
1001 		if (!needs_clflush_after &&
1002 		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003 			if (i915_gem_clflush_object(obj, obj->pin_display))
1004 				needs_clflush_after = true;
1005 		}
1006 	}
1007 
1008 	if (needs_clflush_after)
1009 		i915_gem_chipset_flush(dev);
1010 	else
1011 		obj->cache_dirty = true;
1012 
1013 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1014 	return ret;
1015 }
1016 
1017 /**
1018  * Writes data to the object referenced by handle.
1019  *
1020  * On error, the contents of the buffer that were to be modified are undefined.
1021  */
1022 int
1023 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1024 		      struct drm_file *file)
1025 {
1026 	struct drm_i915_private *dev_priv = dev->dev_private;
1027 	struct drm_i915_gem_pwrite *args = data;
1028 	struct drm_i915_gem_object *obj;
1029 	int ret;
1030 
1031 	if (args->size == 0)
1032 		return 0;
1033 
1034 	if (!access_ok(VERIFY_READ,
1035 		       u64_to_user_ptr(args->data_ptr),
1036 		       args->size))
1037 		return -EFAULT;
1038 
1039 	if (likely(!i915.prefault_disable)) {
1040 		ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1041 						   args->size);
1042 		if (ret)
1043 			return -EFAULT;
1044 	}
1045 
1046 	intel_runtime_pm_get(dev_priv);
1047 
1048 	ret = i915_mutex_lock_interruptible(dev);
1049 	if (ret)
1050 		goto put_rpm;
1051 
1052 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1053 	if (&obj->base == NULL) {
1054 		ret = -ENOENT;
1055 		goto unlock;
1056 	}
1057 
1058 	/* Bounds check destination. */
1059 	if (args->offset > obj->base.size ||
1060 	    args->size > obj->base.size - args->offset) {
1061 		ret = -EINVAL;
1062 		goto out;
1063 	}
1064 
1065 	/* prime objects have no backing filp to GEM pread/pwrite
1066 	 * pages from.
1067 	 */
1068 	if (!obj->base.filp) {
1069 		ret = -EINVAL;
1070 		goto out;
1071 	}
1072 
1073 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1074 
1075 	ret = -EFAULT;
1076 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
1077 	 * it would end up going through the fenced access, and we'll get
1078 	 * different detiling behavior between reading and writing.
1079 	 * pread/pwrite currently are reading and writing from the CPU
1080 	 * perspective, requiring manual detiling by the client.
1081 	 */
1082 	if (obj->tiling_mode == I915_TILING_NONE &&
1083 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1084 	    cpu_write_needs_clflush(obj)) {
1085 		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1086 		/* Note that the gtt paths might fail with non-page-backed user
1087 		 * pointers (e.g. gtt mappings when moving data between
1088 		 * textures). Fallback to the shmem path in that case. */
1089 	}
1090 
1091 	if (ret == -EFAULT || ret == -ENOSPC) {
1092 		if (obj->phys_handle)
1093 			ret = i915_gem_phys_pwrite(obj, args, file);
1094 		else
1095 			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1096 	}
1097 
1098 out:
1099 	drm_gem_object_unreference(&obj->base);
1100 unlock:
1101 	mutex_unlock(&dev->struct_mutex);
1102 put_rpm:
1103 	intel_runtime_pm_put(dev_priv);
1104 
1105 	return ret;
1106 }
1107 
1108 static int
1109 i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1110 {
1111 	if (__i915_terminally_wedged(reset_counter))
1112 		return -EIO;
1113 
1114 	if (__i915_reset_in_progress(reset_counter)) {
1115 		/* Non-interruptible callers can't handle -EAGAIN, hence return
1116 		 * -EIO unconditionally for these. */
1117 		if (!interruptible)
1118 			return -EIO;
1119 
1120 		return -EAGAIN;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 static void fake_irq(unsigned long data)
1127 {
1128 	wake_up_process((struct task_struct *)data);
1129 }
1130 
1131 static bool missed_irq(struct drm_i915_private *dev_priv,
1132 		       struct intel_engine_cs *engine)
1133 {
1134 	return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1135 }
1136 
1137 static unsigned long local_clock_us(unsigned *cpu)
1138 {
1139 	unsigned long t;
1140 
1141 	/* Cheaply and approximately convert from nanoseconds to microseconds.
1142 	 * The result and subsequent calculations are also defined in the same
1143 	 * approximate microseconds units. The principal source of timing
1144 	 * error here is from the simple truncation.
1145 	 *
1146 	 * Note that local_clock() is only defined wrt to the current CPU;
1147 	 * the comparisons are no longer valid if we switch CPUs. Instead of
1148 	 * blocking preemption for the entire busywait, we can detect the CPU
1149 	 * switch and use that as indicator of system load and a reason to
1150 	 * stop busywaiting, see busywait_stop().
1151 	 */
1152 	*cpu = get_cpu();
1153 	t = local_clock() >> 10;
1154 	put_cpu();
1155 
1156 	return t;
1157 }
1158 
1159 static bool busywait_stop(unsigned long timeout, unsigned cpu)
1160 {
1161 	unsigned this_cpu;
1162 
1163 	if (time_after(local_clock_us(&this_cpu), timeout))
1164 		return true;
1165 
1166 	return this_cpu != cpu;
1167 }
1168 
1169 static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1170 {
1171 	unsigned long timeout;
1172 	unsigned cpu;
1173 
1174 	/* When waiting for high frequency requests, e.g. during synchronous
1175 	 * rendering split between the CPU and GPU, the finite amount of time
1176 	 * required to set up the irq and wait upon it limits the response
1177 	 * rate. By busywaiting on the request completion for a short while we
1178 	 * can service the high frequency waits as quick as possible. However,
1179 	 * if it is a slow request, we want to sleep as quickly as possible.
1180 	 * The tradeoff between waiting and sleeping is roughly the time it
1181 	 * takes to sleep on a request, on the order of a microsecond.
1182 	 */
1183 
1184 	if (req->engine->irq_refcount)
1185 		return -EBUSY;
1186 
1187 	/* Only spin if we know the GPU is processing this request */
1188 	if (!i915_gem_request_started(req, true))
1189 		return -EAGAIN;
1190 
1191 	timeout = local_clock_us(&cpu) + 5;
1192 	while (!need_resched()) {
1193 		if (i915_gem_request_completed(req, true))
1194 			return 0;
1195 
1196 		if (signal_pending_state(state, current))
1197 			break;
1198 
1199 		if (busywait_stop(timeout, cpu))
1200 			break;
1201 
1202 		cpu_relax_lowlatency();
1203 	}
1204 
1205 	if (i915_gem_request_completed(req, false))
1206 		return 0;
1207 
1208 	return -EAGAIN;
1209 }
1210 
1211 /**
1212  * __i915_wait_request - wait until execution of request has finished
1213  * @req: duh!
1214  * @interruptible: do an interruptible wait (normally yes)
1215  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1216  *
1217  * Note: It is of utmost importance that the passed in seqno and reset_counter
1218  * values have been read by the caller in an smp safe manner. Where read-side
1219  * locks are involved, it is sufficient to read the reset_counter before
1220  * unlocking the lock that protects the seqno. For lockless tricks, the
1221  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1222  * inserted.
1223  *
1224  * Returns 0 if the request was found within the alloted time. Else returns the
1225  * errno with remaining time filled in timeout argument.
1226  */
1227 int __i915_wait_request(struct drm_i915_gem_request *req,
1228 			bool interruptible,
1229 			s64 *timeout,
1230 			struct intel_rps_client *rps)
1231 {
1232 	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233 	struct drm_device *dev = engine->dev;
1234 	struct drm_i915_private *dev_priv = dev->dev_private;
1235 	const bool irq_test_in_progress =
1236 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1238 	DEFINE_WAIT(wait);
1239 	unsigned long timeout_expire;
1240 	s64 before = 0; /* Only to silence a compiler warning. */
1241 	int ret;
1242 
1243 	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1244 
1245 	if (list_empty(&req->list))
1246 		return 0;
1247 
1248 	if (i915_gem_request_completed(req, true))
1249 		return 0;
1250 
1251 	timeout_expire = 0;
1252 	if (timeout) {
1253 		if (WARN_ON(*timeout < 0))
1254 			return -EINVAL;
1255 
1256 		if (*timeout == 0)
1257 			return -ETIME;
1258 
1259 		timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1260 
1261 		/*
1262 		 * Record current time in case interrupted by signal, or wedged.
1263 		 */
1264 		before = ktime_get_raw_ns();
1265 	}
1266 
1267 	if (INTEL_INFO(dev_priv)->gen >= 6)
1268 		gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1269 
1270 	trace_i915_gem_request_wait_begin(req);
1271 
1272 	/* Optimistic spin for the next jiffie before touching IRQs */
1273 	ret = __i915_spin_request(req, state);
1274 	if (ret == 0)
1275 		goto out;
1276 
1277 	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
1278 		ret = -ENODEV;
1279 		goto out;
1280 	}
1281 
1282 	for (;;) {
1283 		struct timer_list timer;
1284 
1285 		prepare_to_wait(&engine->irq_queue, &wait, state);
1286 
1287 		/* We need to check whether any gpu reset happened in between
1288 		 * the request being submitted and now. If a reset has occurred,
1289 		 * the request is effectively complete (we either are in the
1290 		 * process of or have discarded the rendering and completely
1291 		 * reset the GPU. The results of the request are lost and we
1292 		 * are free to continue on with the original operation.
1293 		 */
1294 		if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
1295 			ret = 0;
1296 			break;
1297 		}
1298 
1299 		if (i915_gem_request_completed(req, false)) {
1300 			ret = 0;
1301 			break;
1302 		}
1303 
1304 		if (signal_pending_state(state, current)) {
1305 			ret = -ERESTARTSYS;
1306 			break;
1307 		}
1308 
1309 		if (timeout && time_after_eq(jiffies, timeout_expire)) {
1310 			ret = -ETIME;
1311 			break;
1312 		}
1313 
1314 		timer.function = NULL;
1315 		if (timeout || missed_irq(dev_priv, engine)) {
1316 			unsigned long expire;
1317 
1318 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1319 			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
1320 			mod_timer(&timer, expire);
1321 		}
1322 
1323 		io_schedule();
1324 
1325 		if (timer.function) {
1326 			del_singleshot_timer_sync(&timer);
1327 			destroy_timer_on_stack(&timer);
1328 		}
1329 	}
1330 	if (!irq_test_in_progress)
1331 		engine->irq_put(engine);
1332 
1333 	finish_wait(&engine->irq_queue, &wait);
1334 
1335 out:
1336 	trace_i915_gem_request_wait_end(req);
1337 
1338 	if (timeout) {
1339 		s64 tres = *timeout - (ktime_get_raw_ns() - before);
1340 
1341 		*timeout = tres < 0 ? 0 : tres;
1342 
1343 		/*
1344 		 * Apparently ktime isn't accurate enough and occasionally has a
1345 		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1346 		 * things up to make the test happy. We allow up to 1 jiffy.
1347 		 *
1348 		 * This is a regrssion from the timespec->ktime conversion.
1349 		 */
1350 		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1351 			*timeout = 0;
1352 	}
1353 
1354 	return ret;
1355 }
1356 
1357 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1358 				   struct drm_file *file)
1359 {
1360 	struct drm_i915_file_private *file_priv;
1361 
1362 	WARN_ON(!req || !file || req->file_priv);
1363 
1364 	if (!req || !file)
1365 		return -EINVAL;
1366 
1367 	if (req->file_priv)
1368 		return -EINVAL;
1369 
1370 	file_priv = file->driver_priv;
1371 
1372 	spin_lock(&file_priv->mm.lock);
1373 	req->file_priv = file_priv;
1374 	list_add_tail(&req->client_list, &file_priv->mm.request_list);
1375 	spin_unlock(&file_priv->mm.lock);
1376 
1377 	req->pid = get_pid(task_pid(current));
1378 
1379 	return 0;
1380 }
1381 
1382 static inline void
1383 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1384 {
1385 	struct drm_i915_file_private *file_priv = request->file_priv;
1386 
1387 	if (!file_priv)
1388 		return;
1389 
1390 	spin_lock(&file_priv->mm.lock);
1391 	list_del(&request->client_list);
1392 	request->file_priv = NULL;
1393 	spin_unlock(&file_priv->mm.lock);
1394 
1395 	put_pid(request->pid);
1396 	request->pid = NULL;
1397 }
1398 
1399 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1400 {
1401 	trace_i915_gem_request_retire(request);
1402 
1403 	/* We know the GPU must have read the request to have
1404 	 * sent us the seqno + interrupt, so use the position
1405 	 * of tail of the request to update the last known position
1406 	 * of the GPU head.
1407 	 *
1408 	 * Note this requires that we are always called in request
1409 	 * completion order.
1410 	 */
1411 	request->ringbuf->last_retired_head = request->postfix;
1412 
1413 	list_del_init(&request->list);
1414 	i915_gem_request_remove_from_client(request);
1415 
1416 	i915_gem_request_unreference(request);
1417 }
1418 
1419 static void
1420 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1421 {
1422 	struct intel_engine_cs *engine = req->engine;
1423 	struct drm_i915_gem_request *tmp;
1424 
1425 	lockdep_assert_held(&engine->dev->struct_mutex);
1426 
1427 	if (list_empty(&req->list))
1428 		return;
1429 
1430 	do {
1431 		tmp = list_first_entry(&engine->request_list,
1432 				       typeof(*tmp), list);
1433 
1434 		i915_gem_request_retire(tmp);
1435 	} while (tmp != req);
1436 
1437 	WARN_ON(i915_verify_lists(engine->dev));
1438 }
1439 
1440 /**
1441  * Waits for a request to be signaled, and cleans up the
1442  * request and object lists appropriately for that event.
1443  */
1444 int
1445 i915_wait_request(struct drm_i915_gem_request *req)
1446 {
1447 	struct drm_i915_private *dev_priv = req->i915;
1448 	bool interruptible;
1449 	int ret;
1450 
1451 	interruptible = dev_priv->mm.interruptible;
1452 
1453 	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1454 
1455 	ret = __i915_wait_request(req, interruptible, NULL, NULL);
1456 	if (ret)
1457 		return ret;
1458 
1459 	/* If the GPU hung, we want to keep the requests to find the guilty. */
1460 	if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
1461 		__i915_gem_request_retire__upto(req);
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * Ensures that all rendering to the object has completed and the object is
1468  * safe to unbind from the GTT or access from the CPU.
1469  */
1470 int
1471 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1472 			       bool readonly)
1473 {
1474 	int ret, i;
1475 
1476 	if (!obj->active)
1477 		return 0;
1478 
1479 	if (readonly) {
1480 		if (obj->last_write_req != NULL) {
1481 			ret = i915_wait_request(obj->last_write_req);
1482 			if (ret)
1483 				return ret;
1484 
1485 			i = obj->last_write_req->engine->id;
1486 			if (obj->last_read_req[i] == obj->last_write_req)
1487 				i915_gem_object_retire__read(obj, i);
1488 			else
1489 				i915_gem_object_retire__write(obj);
1490 		}
1491 	} else {
1492 		for (i = 0; i < I915_NUM_ENGINES; i++) {
1493 			if (obj->last_read_req[i] == NULL)
1494 				continue;
1495 
1496 			ret = i915_wait_request(obj->last_read_req[i]);
1497 			if (ret)
1498 				return ret;
1499 
1500 			i915_gem_object_retire__read(obj, i);
1501 		}
1502 		GEM_BUG_ON(obj->active);
1503 	}
1504 
1505 	return 0;
1506 }
1507 
1508 static void
1509 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1510 			       struct drm_i915_gem_request *req)
1511 {
1512 	int ring = req->engine->id;
1513 
1514 	if (obj->last_read_req[ring] == req)
1515 		i915_gem_object_retire__read(obj, ring);
1516 	else if (obj->last_write_req == req)
1517 		i915_gem_object_retire__write(obj);
1518 
1519 	if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
1520 		__i915_gem_request_retire__upto(req);
1521 }
1522 
1523 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1524  * as the object state may change during this call.
1525  */
1526 static __must_check int
1527 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1528 					    struct intel_rps_client *rps,
1529 					    bool readonly)
1530 {
1531 	struct drm_device *dev = obj->base.dev;
1532 	struct drm_i915_private *dev_priv = dev->dev_private;
1533 	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1534 	int ret, i, n = 0;
1535 
1536 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1537 	BUG_ON(!dev_priv->mm.interruptible);
1538 
1539 	if (!obj->active)
1540 		return 0;
1541 
1542 	if (readonly) {
1543 		struct drm_i915_gem_request *req;
1544 
1545 		req = obj->last_write_req;
1546 		if (req == NULL)
1547 			return 0;
1548 
1549 		requests[n++] = i915_gem_request_reference(req);
1550 	} else {
1551 		for (i = 0; i < I915_NUM_ENGINES; i++) {
1552 			struct drm_i915_gem_request *req;
1553 
1554 			req = obj->last_read_req[i];
1555 			if (req == NULL)
1556 				continue;
1557 
1558 			requests[n++] = i915_gem_request_reference(req);
1559 		}
1560 	}
1561 
1562 	mutex_unlock(&dev->struct_mutex);
1563 	ret = 0;
1564 	for (i = 0; ret == 0 && i < n; i++)
1565 		ret = __i915_wait_request(requests[i], true, NULL, rps);
1566 	mutex_lock(&dev->struct_mutex);
1567 
1568 	for (i = 0; i < n; i++) {
1569 		if (ret == 0)
1570 			i915_gem_object_retire_request(obj, requests[i]);
1571 		i915_gem_request_unreference(requests[i]);
1572 	}
1573 
1574 	return ret;
1575 }
1576 
1577 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1578 {
1579 	struct drm_i915_file_private *fpriv = file->driver_priv;
1580 	return &fpriv->rps;
1581 }
1582 
1583 /**
1584  * Called when user space prepares to use an object with the CPU, either
1585  * through the mmap ioctl's mapping or a GTT mapping.
1586  */
1587 int
1588 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1589 			  struct drm_file *file)
1590 {
1591 	struct drm_i915_gem_set_domain *args = data;
1592 	struct drm_i915_gem_object *obj;
1593 	uint32_t read_domains = args->read_domains;
1594 	uint32_t write_domain = args->write_domain;
1595 	int ret;
1596 
1597 	/* Only handle setting domains to types used by the CPU. */
1598 	if (write_domain & I915_GEM_GPU_DOMAINS)
1599 		return -EINVAL;
1600 
1601 	if (read_domains & I915_GEM_GPU_DOMAINS)
1602 		return -EINVAL;
1603 
1604 	/* Having something in the write domain implies it's in the read
1605 	 * domain, and only that read domain.  Enforce that in the request.
1606 	 */
1607 	if (write_domain != 0 && read_domains != write_domain)
1608 		return -EINVAL;
1609 
1610 	ret = i915_mutex_lock_interruptible(dev);
1611 	if (ret)
1612 		return ret;
1613 
1614 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1615 	if (&obj->base == NULL) {
1616 		ret = -ENOENT;
1617 		goto unlock;
1618 	}
1619 
1620 	/* Try to flush the object off the GPU without holding the lock.
1621 	 * We will repeat the flush holding the lock in the normal manner
1622 	 * to catch cases where we are gazumped.
1623 	 */
1624 	ret = i915_gem_object_wait_rendering__nonblocking(obj,
1625 							  to_rps_client(file),
1626 							  !write_domain);
1627 	if (ret)
1628 		goto unref;
1629 
1630 	if (read_domains & I915_GEM_DOMAIN_GTT)
1631 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1632 	else
1633 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1634 
1635 	if (write_domain != 0)
1636 		intel_fb_obj_invalidate(obj,
1637 					write_domain == I915_GEM_DOMAIN_GTT ?
1638 					ORIGIN_GTT : ORIGIN_CPU);
1639 
1640 unref:
1641 	drm_gem_object_unreference(&obj->base);
1642 unlock:
1643 	mutex_unlock(&dev->struct_mutex);
1644 	return ret;
1645 }
1646 
1647 /**
1648  * Called when user space has done writes to this buffer
1649  */
1650 int
1651 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1652 			 struct drm_file *file)
1653 {
1654 	struct drm_i915_gem_sw_finish *args = data;
1655 	struct drm_i915_gem_object *obj;
1656 	int ret = 0;
1657 
1658 	ret = i915_mutex_lock_interruptible(dev);
1659 	if (ret)
1660 		return ret;
1661 
1662 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1663 	if (&obj->base == NULL) {
1664 		ret = -ENOENT;
1665 		goto unlock;
1666 	}
1667 
1668 	/* Pinned buffers may be scanout, so flush the cache */
1669 	if (obj->pin_display)
1670 		i915_gem_object_flush_cpu_write_domain(obj);
1671 
1672 	drm_gem_object_unreference(&obj->base);
1673 unlock:
1674 	mutex_unlock(&dev->struct_mutex);
1675 	return ret;
1676 }
1677 
1678 /**
1679  * Maps the contents of an object, returning the address it is mapped
1680  * into.
1681  *
1682  * While the mapping holds a reference on the contents of the object, it doesn't
1683  * imply a ref on the object itself.
1684  *
1685  * IMPORTANT:
1686  *
1687  * DRM driver writers who look a this function as an example for how to do GEM
1688  * mmap support, please don't implement mmap support like here. The modern way
1689  * to implement DRM mmap support is with an mmap offset ioctl (like
1690  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1691  * That way debug tooling like valgrind will understand what's going on, hiding
1692  * the mmap call in a driver private ioctl will break that. The i915 driver only
1693  * does cpu mmaps this way because we didn't know better.
1694  */
1695 int
1696 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1697 		    struct drm_file *file)
1698 {
1699 	struct drm_i915_gem_mmap *args = data;
1700 	struct drm_gem_object *obj;
1701 	unsigned long addr;
1702 
1703 	if (args->flags & ~(I915_MMAP_WC))
1704 		return -EINVAL;
1705 
1706 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1707 		return -ENODEV;
1708 
1709 	obj = drm_gem_object_lookup(file, args->handle);
1710 	if (obj == NULL)
1711 		return -ENOENT;
1712 
1713 	/* prime objects have no backing filp to GEM mmap
1714 	 * pages from.
1715 	 */
1716 	if (!obj->filp) {
1717 		drm_gem_object_unreference_unlocked(obj);
1718 		return -EINVAL;
1719 	}
1720 
1721 	addr = vm_mmap(obj->filp, 0, args->size,
1722 		       PROT_READ | PROT_WRITE, MAP_SHARED,
1723 		       args->offset);
1724 	if (args->flags & I915_MMAP_WC) {
1725 		struct mm_struct *mm = current->mm;
1726 		struct vm_area_struct *vma;
1727 
1728 		if (down_write_killable(&mm->mmap_sem)) {
1729 			drm_gem_object_unreference_unlocked(obj);
1730 			return -EINTR;
1731 		}
1732 		vma = find_vma(mm, addr);
1733 		if (vma)
1734 			vma->vm_page_prot =
1735 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1736 		else
1737 			addr = -ENOMEM;
1738 		up_write(&mm->mmap_sem);
1739 	}
1740 	drm_gem_object_unreference_unlocked(obj);
1741 	if (IS_ERR((void *)addr))
1742 		return addr;
1743 
1744 	args->addr_ptr = (uint64_t) addr;
1745 
1746 	return 0;
1747 }
1748 
1749 /**
1750  * i915_gem_fault - fault a page into the GTT
1751  * @vma: VMA in question
1752  * @vmf: fault info
1753  *
1754  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1755  * from userspace.  The fault handler takes care of binding the object to
1756  * the GTT (if needed), allocating and programming a fence register (again,
1757  * only if needed based on whether the old reg is still valid or the object
1758  * is tiled) and inserting a new PTE into the faulting process.
1759  *
1760  * Note that the faulting process may involve evicting existing objects
1761  * from the GTT and/or fence registers to make room.  So performance may
1762  * suffer if the GTT working set is large or there are few fence registers
1763  * left.
1764  */
1765 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1766 {
1767 	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1768 	struct drm_device *dev = obj->base.dev;
1769 	struct drm_i915_private *dev_priv = to_i915(dev);
1770 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1771 	struct i915_ggtt_view view = i915_ggtt_view_normal;
1772 	pgoff_t page_offset;
1773 	unsigned long pfn;
1774 	int ret = 0;
1775 	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1776 
1777 	intel_runtime_pm_get(dev_priv);
1778 
1779 	/* We don't use vmf->pgoff since that has the fake offset */
1780 	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1781 		PAGE_SHIFT;
1782 
1783 	ret = i915_mutex_lock_interruptible(dev);
1784 	if (ret)
1785 		goto out;
1786 
1787 	trace_i915_gem_object_fault(obj, page_offset, true, write);
1788 
1789 	/* Try to flush the object off the GPU first without holding the lock.
1790 	 * Upon reacquiring the lock, we will perform our sanity checks and then
1791 	 * repeat the flush holding the lock in the normal manner to catch cases
1792 	 * where we are gazumped.
1793 	 */
1794 	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1795 	if (ret)
1796 		goto unlock;
1797 
1798 	/* Access to snoopable pages through the GTT is incoherent. */
1799 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1800 		ret = -EFAULT;
1801 		goto unlock;
1802 	}
1803 
1804 	/* Use a partial view if the object is bigger than the aperture. */
1805 	if (obj->base.size >= ggtt->mappable_end &&
1806 	    obj->tiling_mode == I915_TILING_NONE) {
1807 		static const unsigned int chunk_size = 256; // 1 MiB
1808 
1809 		memset(&view, 0, sizeof(view));
1810 		view.type = I915_GGTT_VIEW_PARTIAL;
1811 		view.params.partial.offset = rounddown(page_offset, chunk_size);
1812 		view.params.partial.size =
1813 			min_t(unsigned int,
1814 			      chunk_size,
1815 			      (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1816 			      view.params.partial.offset);
1817 	}
1818 
1819 	/* Now pin it into the GTT if needed */
1820 	ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
1821 	if (ret)
1822 		goto unlock;
1823 
1824 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
1825 	if (ret)
1826 		goto unpin;
1827 
1828 	ret = i915_gem_object_get_fence(obj);
1829 	if (ret)
1830 		goto unpin;
1831 
1832 	/* Finally, remap it using the new GTT offset */
1833 	pfn = ggtt->mappable_base +
1834 		i915_gem_obj_ggtt_offset_view(obj, &view);
1835 	pfn >>= PAGE_SHIFT;
1836 
1837 	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1838 		/* Overriding existing pages in partial view does not cause
1839 		 * us any trouble as TLBs are still valid because the fault
1840 		 * is due to userspace losing part of the mapping or never
1841 		 * having accessed it before (at this partials' range).
1842 		 */
1843 		unsigned long base = vma->vm_start +
1844 				     (view.params.partial.offset << PAGE_SHIFT);
1845 		unsigned int i;
1846 
1847 		for (i = 0; i < view.params.partial.size; i++) {
1848 			ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1849 			if (ret)
1850 				break;
1851 		}
1852 
1853 		obj->fault_mappable = true;
1854 	} else {
1855 		if (!obj->fault_mappable) {
1856 			unsigned long size = min_t(unsigned long,
1857 						   vma->vm_end - vma->vm_start,
1858 						   obj->base.size);
1859 			int i;
1860 
1861 			for (i = 0; i < size >> PAGE_SHIFT; i++) {
1862 				ret = vm_insert_pfn(vma,
1863 						    (unsigned long)vma->vm_start + i * PAGE_SIZE,
1864 						    pfn + i);
1865 				if (ret)
1866 					break;
1867 			}
1868 
1869 			obj->fault_mappable = true;
1870 		} else
1871 			ret = vm_insert_pfn(vma,
1872 					    (unsigned long)vmf->virtual_address,
1873 					    pfn + page_offset);
1874 	}
1875 unpin:
1876 	i915_gem_object_ggtt_unpin_view(obj, &view);
1877 unlock:
1878 	mutex_unlock(&dev->struct_mutex);
1879 out:
1880 	switch (ret) {
1881 	case -EIO:
1882 		/*
1883 		 * We eat errors when the gpu is terminally wedged to avoid
1884 		 * userspace unduly crashing (gl has no provisions for mmaps to
1885 		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1886 		 * and so needs to be reported.
1887 		 */
1888 		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1889 			ret = VM_FAULT_SIGBUS;
1890 			break;
1891 		}
1892 	case -EAGAIN:
1893 		/*
1894 		 * EAGAIN means the gpu is hung and we'll wait for the error
1895 		 * handler to reset everything when re-faulting in
1896 		 * i915_mutex_lock_interruptible.
1897 		 */
1898 	case 0:
1899 	case -ERESTARTSYS:
1900 	case -EINTR:
1901 	case -EBUSY:
1902 		/*
1903 		 * EBUSY is ok: this just means that another thread
1904 		 * already did the job.
1905 		 */
1906 		ret = VM_FAULT_NOPAGE;
1907 		break;
1908 	case -ENOMEM:
1909 		ret = VM_FAULT_OOM;
1910 		break;
1911 	case -ENOSPC:
1912 	case -EFAULT:
1913 		ret = VM_FAULT_SIGBUS;
1914 		break;
1915 	default:
1916 		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1917 		ret = VM_FAULT_SIGBUS;
1918 		break;
1919 	}
1920 
1921 	intel_runtime_pm_put(dev_priv);
1922 	return ret;
1923 }
1924 
1925 /**
1926  * i915_gem_release_mmap - remove physical page mappings
1927  * @obj: obj in question
1928  *
1929  * Preserve the reservation of the mmapping with the DRM core code, but
1930  * relinquish ownership of the pages back to the system.
1931  *
1932  * It is vital that we remove the page mapping if we have mapped a tiled
1933  * object through the GTT and then lose the fence register due to
1934  * resource pressure. Similarly if the object has been moved out of the
1935  * aperture, than pages mapped into userspace must be revoked. Removing the
1936  * mapping will then trigger a page fault on the next user access, allowing
1937  * fixup by i915_gem_fault().
1938  */
1939 void
1940 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1941 {
1942 	/* Serialisation between user GTT access and our code depends upon
1943 	 * revoking the CPU's PTE whilst the mutex is held. The next user
1944 	 * pagefault then has to wait until we release the mutex.
1945 	 */
1946 	lockdep_assert_held(&obj->base.dev->struct_mutex);
1947 
1948 	if (!obj->fault_mappable)
1949 		return;
1950 
1951 	drm_vma_node_unmap(&obj->base.vma_node,
1952 			   obj->base.dev->anon_inode->i_mapping);
1953 
1954 	/* Ensure that the CPU's PTE are revoked and there are not outstanding
1955 	 * memory transactions from userspace before we return. The TLB
1956 	 * flushing implied above by changing the PTE above *should* be
1957 	 * sufficient, an extra barrier here just provides us with a bit
1958 	 * of paranoid documentation about our requirement to serialise
1959 	 * memory writes before touching registers / GSM.
1960 	 */
1961 	wmb();
1962 
1963 	obj->fault_mappable = false;
1964 }
1965 
1966 void
1967 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1968 {
1969 	struct drm_i915_gem_object *obj;
1970 
1971 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1972 		i915_gem_release_mmap(obj);
1973 }
1974 
1975 uint32_t
1976 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1977 {
1978 	uint32_t gtt_size;
1979 
1980 	if (INTEL_INFO(dev)->gen >= 4 ||
1981 	    tiling_mode == I915_TILING_NONE)
1982 		return size;
1983 
1984 	/* Previous chips need a power-of-two fence region when tiling */
1985 	if (INTEL_INFO(dev)->gen == 3)
1986 		gtt_size = 1024*1024;
1987 	else
1988 		gtt_size = 512*1024;
1989 
1990 	while (gtt_size < size)
1991 		gtt_size <<= 1;
1992 
1993 	return gtt_size;
1994 }
1995 
1996 /**
1997  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1998  * @obj: object to check
1999  *
2000  * Return the required GTT alignment for an object, taking into account
2001  * potential fence register mapping.
2002  */
2003 uint32_t
2004 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2005 			   int tiling_mode, bool fenced)
2006 {
2007 	/*
2008 	 * Minimum alignment is 4k (GTT page size), but might be greater
2009 	 * if a fence register is needed for the object.
2010 	 */
2011 	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2012 	    tiling_mode == I915_TILING_NONE)
2013 		return 4096;
2014 
2015 	/*
2016 	 * Previous chips need to be aligned to the size of the smallest
2017 	 * fence register that can contain the object.
2018 	 */
2019 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
2020 }
2021 
2022 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2023 {
2024 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2025 	int ret;
2026 
2027 	dev_priv->mm.shrinker_no_lock_stealing = true;
2028 
2029 	ret = drm_gem_create_mmap_offset(&obj->base);
2030 	if (ret != -ENOSPC)
2031 		goto out;
2032 
2033 	/* Badly fragmented mmap space? The only way we can recover
2034 	 * space is by destroying unwanted objects. We can't randomly release
2035 	 * mmap_offsets as userspace expects them to be persistent for the
2036 	 * lifetime of the objects. The closest we can is to release the
2037 	 * offsets on purgeable objects by truncating it and marking it purged,
2038 	 * which prevents userspace from ever using that object again.
2039 	 */
2040 	i915_gem_shrink(dev_priv,
2041 			obj->base.size >> PAGE_SHIFT,
2042 			I915_SHRINK_BOUND |
2043 			I915_SHRINK_UNBOUND |
2044 			I915_SHRINK_PURGEABLE);
2045 	ret = drm_gem_create_mmap_offset(&obj->base);
2046 	if (ret != -ENOSPC)
2047 		goto out;
2048 
2049 	i915_gem_shrink_all(dev_priv);
2050 	ret = drm_gem_create_mmap_offset(&obj->base);
2051 out:
2052 	dev_priv->mm.shrinker_no_lock_stealing = false;
2053 
2054 	return ret;
2055 }
2056 
2057 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2058 {
2059 	drm_gem_free_mmap_offset(&obj->base);
2060 }
2061 
2062 int
2063 i915_gem_mmap_gtt(struct drm_file *file,
2064 		  struct drm_device *dev,
2065 		  uint32_t handle,
2066 		  uint64_t *offset)
2067 {
2068 	struct drm_i915_gem_object *obj;
2069 	int ret;
2070 
2071 	ret = i915_mutex_lock_interruptible(dev);
2072 	if (ret)
2073 		return ret;
2074 
2075 	obj = to_intel_bo(drm_gem_object_lookup(file, handle));
2076 	if (&obj->base == NULL) {
2077 		ret = -ENOENT;
2078 		goto unlock;
2079 	}
2080 
2081 	if (obj->madv != I915_MADV_WILLNEED) {
2082 		DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2083 		ret = -EFAULT;
2084 		goto out;
2085 	}
2086 
2087 	ret = i915_gem_object_create_mmap_offset(obj);
2088 	if (ret)
2089 		goto out;
2090 
2091 	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2092 
2093 out:
2094 	drm_gem_object_unreference(&obj->base);
2095 unlock:
2096 	mutex_unlock(&dev->struct_mutex);
2097 	return ret;
2098 }
2099 
2100 /**
2101  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2102  * @dev: DRM device
2103  * @data: GTT mapping ioctl data
2104  * @file: GEM object info
2105  *
2106  * Simply returns the fake offset to userspace so it can mmap it.
2107  * The mmap call will end up in drm_gem_mmap(), which will set things
2108  * up so we can get faults in the handler above.
2109  *
2110  * The fault handler will take care of binding the object into the GTT
2111  * (since it may have been evicted to make room for something), allocating
2112  * a fence register, and mapping the appropriate aperture address into
2113  * userspace.
2114  */
2115 int
2116 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2117 			struct drm_file *file)
2118 {
2119 	struct drm_i915_gem_mmap_gtt *args = data;
2120 
2121 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2122 }
2123 
2124 /* Immediately discard the backing storage */
2125 static void
2126 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2127 {
2128 	i915_gem_object_free_mmap_offset(obj);
2129 
2130 	if (obj->base.filp == NULL)
2131 		return;
2132 
2133 	/* Our goal here is to return as much of the memory as
2134 	 * is possible back to the system as we are called from OOM.
2135 	 * To do this we must instruct the shmfs to drop all of its
2136 	 * backing pages, *now*.
2137 	 */
2138 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2139 	obj->madv = __I915_MADV_PURGED;
2140 }
2141 
2142 /* Try to discard unwanted pages */
2143 static void
2144 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2145 {
2146 	struct address_space *mapping;
2147 
2148 	switch (obj->madv) {
2149 	case I915_MADV_DONTNEED:
2150 		i915_gem_object_truncate(obj);
2151 	case __I915_MADV_PURGED:
2152 		return;
2153 	}
2154 
2155 	if (obj->base.filp == NULL)
2156 		return;
2157 
2158 	mapping = file_inode(obj->base.filp)->i_mapping,
2159 	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2160 }
2161 
2162 static void
2163 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2164 {
2165 	struct sg_page_iter sg_iter;
2166 	int ret;
2167 
2168 	BUG_ON(obj->madv == __I915_MADV_PURGED);
2169 
2170 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
2171 	if (WARN_ON(ret)) {
2172 		/* In the event of a disaster, abandon all caches and
2173 		 * hope for the best.
2174 		 */
2175 		i915_gem_clflush_object(obj, true);
2176 		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2177 	}
2178 
2179 	i915_gem_gtt_finish_object(obj);
2180 
2181 	if (i915_gem_object_needs_bit17_swizzle(obj))
2182 		i915_gem_object_save_bit_17_swizzle(obj);
2183 
2184 	if (obj->madv == I915_MADV_DONTNEED)
2185 		obj->dirty = 0;
2186 
2187 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2188 		struct page *page = sg_page_iter_page(&sg_iter);
2189 
2190 		if (obj->dirty)
2191 			set_page_dirty(page);
2192 
2193 		if (obj->madv == I915_MADV_WILLNEED)
2194 			mark_page_accessed(page);
2195 
2196 		put_page(page);
2197 	}
2198 	obj->dirty = 0;
2199 
2200 	sg_free_table(obj->pages);
2201 	kfree(obj->pages);
2202 }
2203 
2204 int
2205 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2206 {
2207 	const struct drm_i915_gem_object_ops *ops = obj->ops;
2208 
2209 	if (obj->pages == NULL)
2210 		return 0;
2211 
2212 	if (obj->pages_pin_count)
2213 		return -EBUSY;
2214 
2215 	BUG_ON(i915_gem_obj_bound_any(obj));
2216 
2217 	/* ->put_pages might need to allocate memory for the bit17 swizzle
2218 	 * array, hence protect them from being reaped by removing them from gtt
2219 	 * lists early. */
2220 	list_del(&obj->global_list);
2221 
2222 	if (obj->mapping) {
2223 		if (is_vmalloc_addr(obj->mapping))
2224 			vunmap(obj->mapping);
2225 		else
2226 			kunmap(kmap_to_page(obj->mapping));
2227 		obj->mapping = NULL;
2228 	}
2229 
2230 	ops->put_pages(obj);
2231 	obj->pages = NULL;
2232 
2233 	i915_gem_object_invalidate(obj);
2234 
2235 	return 0;
2236 }
2237 
2238 static int
2239 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2240 {
2241 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2242 	int page_count, i;
2243 	struct address_space *mapping;
2244 	struct sg_table *st;
2245 	struct scatterlist *sg;
2246 	struct sg_page_iter sg_iter;
2247 	struct page *page;
2248 	unsigned long last_pfn = 0;	/* suppress gcc warning */
2249 	int ret;
2250 	gfp_t gfp;
2251 
2252 	/* Assert that the object is not currently in any GPU domain. As it
2253 	 * wasn't in the GTT, there shouldn't be any way it could have been in
2254 	 * a GPU cache
2255 	 */
2256 	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2257 	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2258 
2259 	st = kmalloc(sizeof(*st), GFP_KERNEL);
2260 	if (st == NULL)
2261 		return -ENOMEM;
2262 
2263 	page_count = obj->base.size / PAGE_SIZE;
2264 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2265 		kfree(st);
2266 		return -ENOMEM;
2267 	}
2268 
2269 	/* Get the list of pages out of our struct file.  They'll be pinned
2270 	 * at this point until we release them.
2271 	 *
2272 	 * Fail silently without starting the shrinker
2273 	 */
2274 	mapping = file_inode(obj->base.filp)->i_mapping;
2275 	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2276 	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2277 	sg = st->sgl;
2278 	st->nents = 0;
2279 	for (i = 0; i < page_count; i++) {
2280 		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2281 		if (IS_ERR(page)) {
2282 			i915_gem_shrink(dev_priv,
2283 					page_count,
2284 					I915_SHRINK_BOUND |
2285 					I915_SHRINK_UNBOUND |
2286 					I915_SHRINK_PURGEABLE);
2287 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2288 		}
2289 		if (IS_ERR(page)) {
2290 			/* We've tried hard to allocate the memory by reaping
2291 			 * our own buffer, now let the real VM do its job and
2292 			 * go down in flames if truly OOM.
2293 			 */
2294 			i915_gem_shrink_all(dev_priv);
2295 			page = shmem_read_mapping_page(mapping, i);
2296 			if (IS_ERR(page)) {
2297 				ret = PTR_ERR(page);
2298 				goto err_pages;
2299 			}
2300 		}
2301 #ifdef CONFIG_SWIOTLB
2302 		if (swiotlb_nr_tbl()) {
2303 			st->nents++;
2304 			sg_set_page(sg, page, PAGE_SIZE, 0);
2305 			sg = sg_next(sg);
2306 			continue;
2307 		}
2308 #endif
2309 		if (!i || page_to_pfn(page) != last_pfn + 1) {
2310 			if (i)
2311 				sg = sg_next(sg);
2312 			st->nents++;
2313 			sg_set_page(sg, page, PAGE_SIZE, 0);
2314 		} else {
2315 			sg->length += PAGE_SIZE;
2316 		}
2317 		last_pfn = page_to_pfn(page);
2318 
2319 		/* Check that the i965g/gm workaround works. */
2320 		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2321 	}
2322 #ifdef CONFIG_SWIOTLB
2323 	if (!swiotlb_nr_tbl())
2324 #endif
2325 		sg_mark_end(sg);
2326 	obj->pages = st;
2327 
2328 	ret = i915_gem_gtt_prepare_object(obj);
2329 	if (ret)
2330 		goto err_pages;
2331 
2332 	if (i915_gem_object_needs_bit17_swizzle(obj))
2333 		i915_gem_object_do_bit_17_swizzle(obj);
2334 
2335 	if (obj->tiling_mode != I915_TILING_NONE &&
2336 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2337 		i915_gem_object_pin_pages(obj);
2338 
2339 	return 0;
2340 
2341 err_pages:
2342 	sg_mark_end(sg);
2343 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2344 		put_page(sg_page_iter_page(&sg_iter));
2345 	sg_free_table(st);
2346 	kfree(st);
2347 
2348 	/* shmemfs first checks if there is enough memory to allocate the page
2349 	 * and reports ENOSPC should there be insufficient, along with the usual
2350 	 * ENOMEM for a genuine allocation failure.
2351 	 *
2352 	 * We use ENOSPC in our driver to mean that we have run out of aperture
2353 	 * space and so want to translate the error from shmemfs back to our
2354 	 * usual understanding of ENOMEM.
2355 	 */
2356 	if (ret == -ENOSPC)
2357 		ret = -ENOMEM;
2358 
2359 	return ret;
2360 }
2361 
2362 /* Ensure that the associated pages are gathered from the backing storage
2363  * and pinned into our object. i915_gem_object_get_pages() may be called
2364  * multiple times before they are released by a single call to
2365  * i915_gem_object_put_pages() - once the pages are no longer referenced
2366  * either as a result of memory pressure (reaping pages under the shrinker)
2367  * or as the object is itself released.
2368  */
2369 int
2370 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2371 {
2372 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2373 	const struct drm_i915_gem_object_ops *ops = obj->ops;
2374 	int ret;
2375 
2376 	if (obj->pages)
2377 		return 0;
2378 
2379 	if (obj->madv != I915_MADV_WILLNEED) {
2380 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
2381 		return -EFAULT;
2382 	}
2383 
2384 	BUG_ON(obj->pages_pin_count);
2385 
2386 	ret = ops->get_pages(obj);
2387 	if (ret)
2388 		return ret;
2389 
2390 	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2391 
2392 	obj->get_page.sg = obj->pages->sgl;
2393 	obj->get_page.last = 0;
2394 
2395 	return 0;
2396 }
2397 
2398 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2399 {
2400 	int ret;
2401 
2402 	lockdep_assert_held(&obj->base.dev->struct_mutex);
2403 
2404 	ret = i915_gem_object_get_pages(obj);
2405 	if (ret)
2406 		return ERR_PTR(ret);
2407 
2408 	i915_gem_object_pin_pages(obj);
2409 
2410 	if (obj->mapping == NULL) {
2411 		struct page **pages;
2412 
2413 		pages = NULL;
2414 		if (obj->base.size == PAGE_SIZE)
2415 			obj->mapping = kmap(sg_page(obj->pages->sgl));
2416 		else
2417 			pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2418 					       sizeof(*pages),
2419 					       GFP_TEMPORARY);
2420 		if (pages != NULL) {
2421 			struct sg_page_iter sg_iter;
2422 			int n;
2423 
2424 			n = 0;
2425 			for_each_sg_page(obj->pages->sgl, &sg_iter,
2426 					 obj->pages->nents, 0)
2427 				pages[n++] = sg_page_iter_page(&sg_iter);
2428 
2429 			obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2430 			drm_free_large(pages);
2431 		}
2432 		if (obj->mapping == NULL) {
2433 			i915_gem_object_unpin_pages(obj);
2434 			return ERR_PTR(-ENOMEM);
2435 		}
2436 	}
2437 
2438 	return obj->mapping;
2439 }
2440 
2441 void i915_vma_move_to_active(struct i915_vma *vma,
2442 			     struct drm_i915_gem_request *req)
2443 {
2444 	struct drm_i915_gem_object *obj = vma->obj;
2445 	struct intel_engine_cs *engine;
2446 
2447 	engine = i915_gem_request_get_engine(req);
2448 
2449 	/* Add a reference if we're newly entering the active list. */
2450 	if (obj->active == 0)
2451 		drm_gem_object_reference(&obj->base);
2452 	obj->active |= intel_engine_flag(engine);
2453 
2454 	list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
2455 	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
2456 
2457 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
2458 }
2459 
2460 static void
2461 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2462 {
2463 	GEM_BUG_ON(obj->last_write_req == NULL);
2464 	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
2465 
2466 	i915_gem_request_assign(&obj->last_write_req, NULL);
2467 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
2468 }
2469 
2470 static void
2471 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2472 {
2473 	struct i915_vma *vma;
2474 
2475 	GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2476 	GEM_BUG_ON(!(obj->active & (1 << ring)));
2477 
2478 	list_del_init(&obj->engine_list[ring]);
2479 	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2480 
2481 	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
2482 		i915_gem_object_retire__write(obj);
2483 
2484 	obj->active &= ~(1 << ring);
2485 	if (obj->active)
2486 		return;
2487 
2488 	/* Bump our place on the bound list to keep it roughly in LRU order
2489 	 * so that we don't steal from recently used but inactive objects
2490 	 * (unless we are forced to ofc!)
2491 	 */
2492 	list_move_tail(&obj->global_list,
2493 		       &to_i915(obj->base.dev)->mm.bound_list);
2494 
2495 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
2496 		if (!list_empty(&vma->vm_link))
2497 			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2498 	}
2499 
2500 	i915_gem_request_assign(&obj->last_fenced_req, NULL);
2501 	drm_gem_object_unreference(&obj->base);
2502 }
2503 
2504 static int
2505 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2506 {
2507 	struct drm_i915_private *dev_priv = dev->dev_private;
2508 	struct intel_engine_cs *engine;
2509 	int ret;
2510 
2511 	/* Carefully retire all requests without writing to the rings */
2512 	for_each_engine(engine, dev_priv) {
2513 		ret = intel_engine_idle(engine);
2514 		if (ret)
2515 			return ret;
2516 	}
2517 	i915_gem_retire_requests(dev);
2518 
2519 	/* Finally reset hw state */
2520 	for_each_engine(engine, dev_priv)
2521 		intel_ring_init_seqno(engine, seqno);
2522 
2523 	return 0;
2524 }
2525 
2526 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2527 {
2528 	struct drm_i915_private *dev_priv = dev->dev_private;
2529 	int ret;
2530 
2531 	if (seqno == 0)
2532 		return -EINVAL;
2533 
2534 	/* HWS page needs to be set less than what we
2535 	 * will inject to ring
2536 	 */
2537 	ret = i915_gem_init_seqno(dev, seqno - 1);
2538 	if (ret)
2539 		return ret;
2540 
2541 	/* Carefully set the last_seqno value so that wrap
2542 	 * detection still works
2543 	 */
2544 	dev_priv->next_seqno = seqno;
2545 	dev_priv->last_seqno = seqno - 1;
2546 	if (dev_priv->last_seqno == 0)
2547 		dev_priv->last_seqno--;
2548 
2549 	return 0;
2550 }
2551 
2552 int
2553 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2554 {
2555 	struct drm_i915_private *dev_priv = dev->dev_private;
2556 
2557 	/* reserve 0 for non-seqno */
2558 	if (dev_priv->next_seqno == 0) {
2559 		int ret = i915_gem_init_seqno(dev, 0);
2560 		if (ret)
2561 			return ret;
2562 
2563 		dev_priv->next_seqno = 1;
2564 	}
2565 
2566 	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2567 	return 0;
2568 }
2569 
2570 /*
2571  * NB: This function is not allowed to fail. Doing so would mean the the
2572  * request is not being tracked for completion but the work itself is
2573  * going to happen on the hardware. This would be a Bad Thing(tm).
2574  */
2575 void __i915_add_request(struct drm_i915_gem_request *request,
2576 			struct drm_i915_gem_object *obj,
2577 			bool flush_caches)
2578 {
2579 	struct intel_engine_cs *engine;
2580 	struct drm_i915_private *dev_priv;
2581 	struct intel_ringbuffer *ringbuf;
2582 	u32 request_start;
2583 	int ret;
2584 
2585 	if (WARN_ON(request == NULL))
2586 		return;
2587 
2588 	engine = request->engine;
2589 	dev_priv = request->i915;
2590 	ringbuf = request->ringbuf;
2591 
2592 	/*
2593 	 * To ensure that this call will not fail, space for its emissions
2594 	 * should already have been reserved in the ring buffer. Let the ring
2595 	 * know that it is time to use that space up.
2596 	 */
2597 	intel_ring_reserved_space_use(ringbuf);
2598 
2599 	request_start = intel_ring_get_tail(ringbuf);
2600 	/*
2601 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
2602 	 * after having emitted the batchbuffer command. Hence we need to fix
2603 	 * things up similar to emitting the lazy request. The difference here
2604 	 * is that the flush _must_ happen before the next request, no matter
2605 	 * what.
2606 	 */
2607 	if (flush_caches) {
2608 		if (i915.enable_execlists)
2609 			ret = logical_ring_flush_all_caches(request);
2610 		else
2611 			ret = intel_ring_flush_all_caches(request);
2612 		/* Not allowed to fail! */
2613 		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2614 	}
2615 
2616 	trace_i915_gem_request_add(request);
2617 
2618 	request->head = request_start;
2619 
2620 	/* Whilst this request exists, batch_obj will be on the
2621 	 * active_list, and so will hold the active reference. Only when this
2622 	 * request is retired will the the batch_obj be moved onto the
2623 	 * inactive_list and lose its active reference. Hence we do not need
2624 	 * to explicitly hold another reference here.
2625 	 */
2626 	request->batch_obj = obj;
2627 
2628 	/* Seal the request and mark it as pending execution. Note that
2629 	 * we may inspect this state, without holding any locks, during
2630 	 * hangcheck. Hence we apply the barrier to ensure that we do not
2631 	 * see a more recent value in the hws than we are tracking.
2632 	 */
2633 	request->emitted_jiffies = jiffies;
2634 	request->previous_seqno = engine->last_submitted_seqno;
2635 	smp_store_mb(engine->last_submitted_seqno, request->seqno);
2636 	list_add_tail(&request->list, &engine->request_list);
2637 
2638 	/* Record the position of the start of the request so that
2639 	 * should we detect the updated seqno part-way through the
2640 	 * GPU processing the request, we never over-estimate the
2641 	 * position of the head.
2642 	 */
2643 	request->postfix = intel_ring_get_tail(ringbuf);
2644 
2645 	if (i915.enable_execlists)
2646 		ret = engine->emit_request(request);
2647 	else {
2648 		ret = engine->add_request(request);
2649 
2650 		request->tail = intel_ring_get_tail(ringbuf);
2651 	}
2652 	/* Not allowed to fail! */
2653 	WARN(ret, "emit|add_request failed: %d!\n", ret);
2654 
2655 	i915_queue_hangcheck(engine->dev);
2656 
2657 	queue_delayed_work(dev_priv->wq,
2658 			   &dev_priv->mm.retire_work,
2659 			   round_jiffies_up_relative(HZ));
2660 	intel_mark_busy(dev_priv->dev);
2661 
2662 	/* Sanity check that the reserved size was large enough. */
2663 	intel_ring_reserved_space_end(ringbuf);
2664 }
2665 
2666 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2667 				   const struct intel_context *ctx)
2668 {
2669 	unsigned long elapsed;
2670 
2671 	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2672 
2673 	if (ctx->hang_stats.banned)
2674 		return true;
2675 
2676 	if (ctx->hang_stats.ban_period_seconds &&
2677 	    elapsed <= ctx->hang_stats.ban_period_seconds) {
2678 		if (!i915_gem_context_is_default(ctx)) {
2679 			DRM_DEBUG("context hanging too fast, banning!\n");
2680 			return true;
2681 		} else if (i915_stop_ring_allow_ban(dev_priv)) {
2682 			if (i915_stop_ring_allow_warn(dev_priv))
2683 				DRM_ERROR("gpu hanging too fast, banning!\n");
2684 			return true;
2685 		}
2686 	}
2687 
2688 	return false;
2689 }
2690 
2691 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2692 				  struct intel_context *ctx,
2693 				  const bool guilty)
2694 {
2695 	struct i915_ctx_hang_stats *hs;
2696 
2697 	if (WARN_ON(!ctx))
2698 		return;
2699 
2700 	hs = &ctx->hang_stats;
2701 
2702 	if (guilty) {
2703 		hs->banned = i915_context_is_banned(dev_priv, ctx);
2704 		hs->batch_active++;
2705 		hs->guilty_ts = get_seconds();
2706 	} else {
2707 		hs->batch_pending++;
2708 	}
2709 }
2710 
2711 void i915_gem_request_free(struct kref *req_ref)
2712 {
2713 	struct drm_i915_gem_request *req = container_of(req_ref,
2714 						 typeof(*req), ref);
2715 	struct intel_context *ctx = req->ctx;
2716 
2717 	if (req->file_priv)
2718 		i915_gem_request_remove_from_client(req);
2719 
2720 	if (ctx) {
2721 		if (i915.enable_execlists && ctx != req->i915->kernel_context)
2722 			intel_lr_context_unpin(ctx, req->engine);
2723 
2724 		i915_gem_context_unreference(ctx);
2725 	}
2726 
2727 	kmem_cache_free(req->i915->requests, req);
2728 }
2729 
2730 static inline int
2731 __i915_gem_request_alloc(struct intel_engine_cs *engine,
2732 			 struct intel_context *ctx,
2733 			 struct drm_i915_gem_request **req_out)
2734 {
2735 	struct drm_i915_private *dev_priv = to_i915(engine->dev);
2736 	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2737 	struct drm_i915_gem_request *req;
2738 	int ret;
2739 
2740 	if (!req_out)
2741 		return -EINVAL;
2742 
2743 	*req_out = NULL;
2744 
2745 	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2746 	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
2747 	 * and restart.
2748 	 */
2749 	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
2750 	if (ret)
2751 		return ret;
2752 
2753 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
2754 	if (req == NULL)
2755 		return -ENOMEM;
2756 
2757 	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
2758 	if (ret)
2759 		goto err;
2760 
2761 	kref_init(&req->ref);
2762 	req->i915 = dev_priv;
2763 	req->engine = engine;
2764 	req->reset_counter = reset_counter;
2765 	req->ctx  = ctx;
2766 	i915_gem_context_reference(req->ctx);
2767 
2768 	if (i915.enable_execlists)
2769 		ret = intel_logical_ring_alloc_request_extras(req);
2770 	else
2771 		ret = intel_ring_alloc_request_extras(req);
2772 	if (ret) {
2773 		i915_gem_context_unreference(req->ctx);
2774 		goto err;
2775 	}
2776 
2777 	/*
2778 	 * Reserve space in the ring buffer for all the commands required to
2779 	 * eventually emit this request. This is to guarantee that the
2780 	 * i915_add_request() call can't fail. Note that the reserve may need
2781 	 * to be redone if the request is not actually submitted straight
2782 	 * away, e.g. because a GPU scheduler has deferred it.
2783 	 */
2784 	if (i915.enable_execlists)
2785 		ret = intel_logical_ring_reserve_space(req);
2786 	else
2787 		ret = intel_ring_reserve_space(req);
2788 	if (ret) {
2789 		/*
2790 		 * At this point, the request is fully allocated even if not
2791 		 * fully prepared. Thus it can be cleaned up using the proper
2792 		 * free code.
2793 		 */
2794 		intel_ring_reserved_space_cancel(req->ringbuf);
2795 		i915_gem_request_unreference(req);
2796 		return ret;
2797 	}
2798 
2799 	*req_out = req;
2800 	return 0;
2801 
2802 err:
2803 	kmem_cache_free(dev_priv->requests, req);
2804 	return ret;
2805 }
2806 
2807 /**
2808  * i915_gem_request_alloc - allocate a request structure
2809  *
2810  * @engine: engine that we wish to issue the request on.
2811  * @ctx: context that the request will be associated with.
2812  *       This can be NULL if the request is not directly related to
2813  *       any specific user context, in which case this function will
2814  *       choose an appropriate context to use.
2815  *
2816  * Returns a pointer to the allocated request if successful,
2817  * or an error code if not.
2818  */
2819 struct drm_i915_gem_request *
2820 i915_gem_request_alloc(struct intel_engine_cs *engine,
2821 		       struct intel_context *ctx)
2822 {
2823 	struct drm_i915_gem_request *req;
2824 	int err;
2825 
2826 	if (ctx == NULL)
2827 		ctx = to_i915(engine->dev)->kernel_context;
2828 	err = __i915_gem_request_alloc(engine, ctx, &req);
2829 	return err ? ERR_PTR(err) : req;
2830 }
2831 
2832 struct drm_i915_gem_request *
2833 i915_gem_find_active_request(struct intel_engine_cs *engine)
2834 {
2835 	struct drm_i915_gem_request *request;
2836 
2837 	list_for_each_entry(request, &engine->request_list, list) {
2838 		if (i915_gem_request_completed(request, false))
2839 			continue;
2840 
2841 		return request;
2842 	}
2843 
2844 	return NULL;
2845 }
2846 
2847 static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
2848 				       struct intel_engine_cs *engine)
2849 {
2850 	struct drm_i915_gem_request *request;
2851 	bool ring_hung;
2852 
2853 	request = i915_gem_find_active_request(engine);
2854 
2855 	if (request == NULL)
2856 		return;
2857 
2858 	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2859 
2860 	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2861 
2862 	list_for_each_entry_continue(request, &engine->request_list, list)
2863 		i915_set_reset_status(dev_priv, request->ctx, false);
2864 }
2865 
2866 static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2867 					struct intel_engine_cs *engine)
2868 {
2869 	struct intel_ringbuffer *buffer;
2870 
2871 	while (!list_empty(&engine->active_list)) {
2872 		struct drm_i915_gem_object *obj;
2873 
2874 		obj = list_first_entry(&engine->active_list,
2875 				       struct drm_i915_gem_object,
2876 				       engine_list[engine->id]);
2877 
2878 		i915_gem_object_retire__read(obj, engine->id);
2879 	}
2880 
2881 	/*
2882 	 * Clear the execlists queue up before freeing the requests, as those
2883 	 * are the ones that keep the context and ringbuffer backing objects
2884 	 * pinned in place.
2885 	 */
2886 
2887 	if (i915.enable_execlists) {
2888 		/* Ensure irq handler finishes or is cancelled. */
2889 		tasklet_kill(&engine->irq_tasklet);
2890 
2891 		spin_lock_bh(&engine->execlist_lock);
2892 		/* list_splice_tail_init checks for empty lists */
2893 		list_splice_tail_init(&engine->execlist_queue,
2894 				      &engine->execlist_retired_req_list);
2895 		spin_unlock_bh(&engine->execlist_lock);
2896 
2897 		intel_execlists_retire_requests(engine);
2898 	}
2899 
2900 	/*
2901 	 * We must free the requests after all the corresponding objects have
2902 	 * been moved off active lists. Which is the same order as the normal
2903 	 * retire_requests function does. This is important if object hold
2904 	 * implicit references on things like e.g. ppgtt address spaces through
2905 	 * the request.
2906 	 */
2907 	while (!list_empty(&engine->request_list)) {
2908 		struct drm_i915_gem_request *request;
2909 
2910 		request = list_first_entry(&engine->request_list,
2911 					   struct drm_i915_gem_request,
2912 					   list);
2913 
2914 		i915_gem_request_retire(request);
2915 	}
2916 
2917 	/* Having flushed all requests from all queues, we know that all
2918 	 * ringbuffers must now be empty. However, since we do not reclaim
2919 	 * all space when retiring the request (to prevent HEADs colliding
2920 	 * with rapid ringbuffer wraparound) the amount of available space
2921 	 * upon reset is less than when we start. Do one more pass over
2922 	 * all the ringbuffers to reset last_retired_head.
2923 	 */
2924 	list_for_each_entry(buffer, &engine->buffers, link) {
2925 		buffer->last_retired_head = buffer->tail;
2926 		intel_ring_update_space(buffer);
2927 	}
2928 
2929 	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
2930 }
2931 
2932 void i915_gem_reset(struct drm_device *dev)
2933 {
2934 	struct drm_i915_private *dev_priv = dev->dev_private;
2935 	struct intel_engine_cs *engine;
2936 
2937 	/*
2938 	 * Before we free the objects from the requests, we need to inspect
2939 	 * them for finding the guilty party. As the requests only borrow
2940 	 * their reference to the objects, the inspection must be done first.
2941 	 */
2942 	for_each_engine(engine, dev_priv)
2943 		i915_gem_reset_engine_status(dev_priv, engine);
2944 
2945 	for_each_engine(engine, dev_priv)
2946 		i915_gem_reset_engine_cleanup(dev_priv, engine);
2947 
2948 	i915_gem_context_reset(dev);
2949 
2950 	i915_gem_restore_fences(dev);
2951 
2952 	WARN_ON(i915_verify_lists(dev));
2953 }
2954 
2955 /**
2956  * This function clears the request list as sequence numbers are passed.
2957  */
2958 void
2959 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2960 {
2961 	WARN_ON(i915_verify_lists(engine->dev));
2962 
2963 	/* Retire requests first as we use it above for the early return.
2964 	 * If we retire requests last, we may use a later seqno and so clear
2965 	 * the requests lists without clearing the active list, leading to
2966 	 * confusion.
2967 	 */
2968 	while (!list_empty(&engine->request_list)) {
2969 		struct drm_i915_gem_request *request;
2970 
2971 		request = list_first_entry(&engine->request_list,
2972 					   struct drm_i915_gem_request,
2973 					   list);
2974 
2975 		if (!i915_gem_request_completed(request, true))
2976 			break;
2977 
2978 		i915_gem_request_retire(request);
2979 	}
2980 
2981 	/* Move any buffers on the active list that are no longer referenced
2982 	 * by the ringbuffer to the flushing/inactive lists as appropriate,
2983 	 * before we free the context associated with the requests.
2984 	 */
2985 	while (!list_empty(&engine->active_list)) {
2986 		struct drm_i915_gem_object *obj;
2987 
2988 		obj = list_first_entry(&engine->active_list,
2989 				       struct drm_i915_gem_object,
2990 				       engine_list[engine->id]);
2991 
2992 		if (!list_empty(&obj->last_read_req[engine->id]->list))
2993 			break;
2994 
2995 		i915_gem_object_retire__read(obj, engine->id);
2996 	}
2997 
2998 	if (unlikely(engine->trace_irq_req &&
2999 		     i915_gem_request_completed(engine->trace_irq_req, true))) {
3000 		engine->irq_put(engine);
3001 		i915_gem_request_assign(&engine->trace_irq_req, NULL);
3002 	}
3003 
3004 	WARN_ON(i915_verify_lists(engine->dev));
3005 }
3006 
3007 bool
3008 i915_gem_retire_requests(struct drm_device *dev)
3009 {
3010 	struct drm_i915_private *dev_priv = dev->dev_private;
3011 	struct intel_engine_cs *engine;
3012 	bool idle = true;
3013 
3014 	for_each_engine(engine, dev_priv) {
3015 		i915_gem_retire_requests_ring(engine);
3016 		idle &= list_empty(&engine->request_list);
3017 		if (i915.enable_execlists) {
3018 			spin_lock_bh(&engine->execlist_lock);
3019 			idle &= list_empty(&engine->execlist_queue);
3020 			spin_unlock_bh(&engine->execlist_lock);
3021 
3022 			intel_execlists_retire_requests(engine);
3023 		}
3024 	}
3025 
3026 	if (idle)
3027 		mod_delayed_work(dev_priv->wq,
3028 				   &dev_priv->mm.idle_work,
3029 				   msecs_to_jiffies(100));
3030 
3031 	return idle;
3032 }
3033 
3034 static void
3035 i915_gem_retire_work_handler(struct work_struct *work)
3036 {
3037 	struct drm_i915_private *dev_priv =
3038 		container_of(work, typeof(*dev_priv), mm.retire_work.work);
3039 	struct drm_device *dev = dev_priv->dev;
3040 	bool idle;
3041 
3042 	/* Come back later if the device is busy... */
3043 	idle = false;
3044 	if (mutex_trylock(&dev->struct_mutex)) {
3045 		idle = i915_gem_retire_requests(dev);
3046 		mutex_unlock(&dev->struct_mutex);
3047 	}
3048 	if (!idle)
3049 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
3050 				   round_jiffies_up_relative(HZ));
3051 }
3052 
3053 static void
3054 i915_gem_idle_work_handler(struct work_struct *work)
3055 {
3056 	struct drm_i915_private *dev_priv =
3057 		container_of(work, typeof(*dev_priv), mm.idle_work.work);
3058 	struct drm_device *dev = dev_priv->dev;
3059 	struct intel_engine_cs *engine;
3060 
3061 	for_each_engine(engine, dev_priv)
3062 		if (!list_empty(&engine->request_list))
3063 			return;
3064 
3065 	/* we probably should sync with hangcheck here, using cancel_work_sync.
3066 	 * Also locking seems to be fubar here, engine->request_list is protected
3067 	 * by dev->struct_mutex. */
3068 
3069 	intel_mark_idle(dev);
3070 
3071 	if (mutex_trylock(&dev->struct_mutex)) {
3072 		for_each_engine(engine, dev_priv)
3073 			i915_gem_batch_pool_fini(&engine->batch_pool);
3074 
3075 		mutex_unlock(&dev->struct_mutex);
3076 	}
3077 }
3078 
3079 /**
3080  * Ensures that an object will eventually get non-busy by flushing any required
3081  * write domains, emitting any outstanding lazy request and retiring and
3082  * completed requests.
3083  */
3084 static int
3085 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3086 {
3087 	int i;
3088 
3089 	if (!obj->active)
3090 		return 0;
3091 
3092 	for (i = 0; i < I915_NUM_ENGINES; i++) {
3093 		struct drm_i915_gem_request *req;
3094 
3095 		req = obj->last_read_req[i];
3096 		if (req == NULL)
3097 			continue;
3098 
3099 		if (list_empty(&req->list))
3100 			goto retire;
3101 
3102 		if (i915_gem_request_completed(req, true)) {
3103 			__i915_gem_request_retire__upto(req);
3104 retire:
3105 			i915_gem_object_retire__read(obj, i);
3106 		}
3107 	}
3108 
3109 	return 0;
3110 }
3111 
3112 /**
3113  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3114  * @DRM_IOCTL_ARGS: standard ioctl arguments
3115  *
3116  * Returns 0 if successful, else an error is returned with the remaining time in
3117  * the timeout parameter.
3118  *  -ETIME: object is still busy after timeout
3119  *  -ERESTARTSYS: signal interrupted the wait
3120  *  -ENONENT: object doesn't exist
3121  * Also possible, but rare:
3122  *  -EAGAIN: GPU wedged
3123  *  -ENOMEM: damn
3124  *  -ENODEV: Internal IRQ fail
3125  *  -E?: The add request failed
3126  *
3127  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3128  * non-zero timeout parameter the wait ioctl will wait for the given number of
3129  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3130  * without holding struct_mutex the object may become re-busied before this
3131  * function completes. A similar but shorter * race condition exists in the busy
3132  * ioctl
3133  */
3134 int
3135 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3136 {
3137 	struct drm_i915_gem_wait *args = data;
3138 	struct drm_i915_gem_object *obj;
3139 	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3140 	int i, n = 0;
3141 	int ret;
3142 
3143 	if (args->flags != 0)
3144 		return -EINVAL;
3145 
3146 	ret = i915_mutex_lock_interruptible(dev);
3147 	if (ret)
3148 		return ret;
3149 
3150 	obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
3151 	if (&obj->base == NULL) {
3152 		mutex_unlock(&dev->struct_mutex);
3153 		return -ENOENT;
3154 	}
3155 
3156 	/* Need to make sure the object gets inactive eventually. */
3157 	ret = i915_gem_object_flush_active(obj);
3158 	if (ret)
3159 		goto out;
3160 
3161 	if (!obj->active)
3162 		goto out;
3163 
3164 	/* Do this after OLR check to make sure we make forward progress polling
3165 	 * on this IOCTL with a timeout == 0 (like busy ioctl)
3166 	 */
3167 	if (args->timeout_ns == 0) {
3168 		ret = -ETIME;
3169 		goto out;
3170 	}
3171 
3172 	drm_gem_object_unreference(&obj->base);
3173 
3174 	for (i = 0; i < I915_NUM_ENGINES; i++) {
3175 		if (obj->last_read_req[i] == NULL)
3176 			continue;
3177 
3178 		req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3179 	}
3180 
3181 	mutex_unlock(&dev->struct_mutex);
3182 
3183 	for (i = 0; i < n; i++) {
3184 		if (ret == 0)
3185 			ret = __i915_wait_request(req[i], true,
3186 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3187 						  to_rps_client(file));
3188 		i915_gem_request_unreference__unlocked(req[i]);
3189 	}
3190 	return ret;
3191 
3192 out:
3193 	drm_gem_object_unreference(&obj->base);
3194 	mutex_unlock(&dev->struct_mutex);
3195 	return ret;
3196 }
3197 
3198 static int
3199 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3200 		       struct intel_engine_cs *to,
3201 		       struct drm_i915_gem_request *from_req,
3202 		       struct drm_i915_gem_request **to_req)
3203 {
3204 	struct intel_engine_cs *from;
3205 	int ret;
3206 
3207 	from = i915_gem_request_get_engine(from_req);
3208 	if (to == from)
3209 		return 0;
3210 
3211 	if (i915_gem_request_completed(from_req, true))
3212 		return 0;
3213 
3214 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
3215 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
3216 		ret = __i915_wait_request(from_req,
3217 					  i915->mm.interruptible,
3218 					  NULL,
3219 					  &i915->rps.semaphores);
3220 		if (ret)
3221 			return ret;
3222 
3223 		i915_gem_object_retire_request(obj, from_req);
3224 	} else {
3225 		int idx = intel_ring_sync_index(from, to);
3226 		u32 seqno = i915_gem_request_get_seqno(from_req);
3227 
3228 		WARN_ON(!to_req);
3229 
3230 		if (seqno <= from->semaphore.sync_seqno[idx])
3231 			return 0;
3232 
3233 		if (*to_req == NULL) {
3234 			struct drm_i915_gem_request *req;
3235 
3236 			req = i915_gem_request_alloc(to, NULL);
3237 			if (IS_ERR(req))
3238 				return PTR_ERR(req);
3239 
3240 			*to_req = req;
3241 		}
3242 
3243 		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3244 		ret = to->semaphore.sync_to(*to_req, from, seqno);
3245 		if (ret)
3246 			return ret;
3247 
3248 		/* We use last_read_req because sync_to()
3249 		 * might have just caused seqno wrap under
3250 		 * the radar.
3251 		 */
3252 		from->semaphore.sync_seqno[idx] =
3253 			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3254 	}
3255 
3256 	return 0;
3257 }
3258 
3259 /**
3260  * i915_gem_object_sync - sync an object to a ring.
3261  *
3262  * @obj: object which may be in use on another ring.
3263  * @to: ring we wish to use the object on. May be NULL.
3264  * @to_req: request we wish to use the object for. See below.
3265  *          This will be allocated and returned if a request is
3266  *          required but not passed in.
3267  *
3268  * This code is meant to abstract object synchronization with the GPU.
3269  * Calling with NULL implies synchronizing the object with the CPU
3270  * rather than a particular GPU ring. Conceptually we serialise writes
3271  * between engines inside the GPU. We only allow one engine to write
3272  * into a buffer at any time, but multiple readers. To ensure each has
3273  * a coherent view of memory, we must:
3274  *
3275  * - If there is an outstanding write request to the object, the new
3276  *   request must wait for it to complete (either CPU or in hw, requests
3277  *   on the same ring will be naturally ordered).
3278  *
3279  * - If we are a write request (pending_write_domain is set), the new
3280  *   request must wait for outstanding read requests to complete.
3281  *
3282  * For CPU synchronisation (NULL to) no request is required. For syncing with
3283  * rings to_req must be non-NULL. However, a request does not have to be
3284  * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3285  * request will be allocated automatically and returned through *to_req. Note
3286  * that it is not guaranteed that commands will be emitted (because the system
3287  * might already be idle). Hence there is no need to create a request that
3288  * might never have any work submitted. Note further that if a request is
3289  * returned in *to_req, it is the responsibility of the caller to submit
3290  * that request (after potentially adding more work to it).
3291  *
3292  * Returns 0 if successful, else propagates up the lower layer error.
3293  */
3294 int
3295 i915_gem_object_sync(struct drm_i915_gem_object *obj,
3296 		     struct intel_engine_cs *to,
3297 		     struct drm_i915_gem_request **to_req)
3298 {
3299 	const bool readonly = obj->base.pending_write_domain == 0;
3300 	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3301 	int ret, i, n;
3302 
3303 	if (!obj->active)
3304 		return 0;
3305 
3306 	if (to == NULL)
3307 		return i915_gem_object_wait_rendering(obj, readonly);
3308 
3309 	n = 0;
3310 	if (readonly) {
3311 		if (obj->last_write_req)
3312 			req[n++] = obj->last_write_req;
3313 	} else {
3314 		for (i = 0; i < I915_NUM_ENGINES; i++)
3315 			if (obj->last_read_req[i])
3316 				req[n++] = obj->last_read_req[i];
3317 	}
3318 	for (i = 0; i < n; i++) {
3319 		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3320 		if (ret)
3321 			return ret;
3322 	}
3323 
3324 	return 0;
3325 }
3326 
3327 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3328 {
3329 	u32 old_write_domain, old_read_domains;
3330 
3331 	/* Force a pagefault for domain tracking on next user access */
3332 	i915_gem_release_mmap(obj);
3333 
3334 	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3335 		return;
3336 
3337 	old_read_domains = obj->base.read_domains;
3338 	old_write_domain = obj->base.write_domain;
3339 
3340 	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3341 	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3342 
3343 	trace_i915_gem_object_change_domain(obj,
3344 					    old_read_domains,
3345 					    old_write_domain);
3346 }
3347 
3348 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3349 {
3350 	struct drm_i915_gem_object *obj = vma->obj;
3351 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3352 	int ret;
3353 
3354 	if (list_empty(&vma->obj_link))
3355 		return 0;
3356 
3357 	if (!drm_mm_node_allocated(&vma->node)) {
3358 		i915_gem_vma_destroy(vma);
3359 		return 0;
3360 	}
3361 
3362 	if (vma->pin_count)
3363 		return -EBUSY;
3364 
3365 	BUG_ON(obj->pages == NULL);
3366 
3367 	if (wait) {
3368 		ret = i915_gem_object_wait_rendering(obj, false);
3369 		if (ret)
3370 			return ret;
3371 	}
3372 
3373 	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3374 		i915_gem_object_finish_gtt(obj);
3375 
3376 		/* release the fence reg _after_ flushing */
3377 		ret = i915_gem_object_put_fence(obj);
3378 		if (ret)
3379 			return ret;
3380 	}
3381 
3382 	trace_i915_vma_unbind(vma);
3383 
3384 	vma->vm->unbind_vma(vma);
3385 	vma->bound = 0;
3386 
3387 	list_del_init(&vma->vm_link);
3388 	if (vma->is_ggtt) {
3389 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3390 			obj->map_and_fenceable = false;
3391 		} else if (vma->ggtt_view.pages) {
3392 			sg_free_table(vma->ggtt_view.pages);
3393 			kfree(vma->ggtt_view.pages);
3394 		}
3395 		vma->ggtt_view.pages = NULL;
3396 	}
3397 
3398 	drm_mm_remove_node(&vma->node);
3399 	i915_gem_vma_destroy(vma);
3400 
3401 	/* Since the unbound list is global, only move to that list if
3402 	 * no more VMAs exist. */
3403 	if (list_empty(&obj->vma_list))
3404 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3405 
3406 	/* And finally now the object is completely decoupled from this vma,
3407 	 * we can drop its hold on the backing storage and allow it to be
3408 	 * reaped by the shrinker.
3409 	 */
3410 	i915_gem_object_unpin_pages(obj);
3411 
3412 	return 0;
3413 }
3414 
3415 int i915_vma_unbind(struct i915_vma *vma)
3416 {
3417 	return __i915_vma_unbind(vma, true);
3418 }
3419 
3420 int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3421 {
3422 	return __i915_vma_unbind(vma, false);
3423 }
3424 
3425 int i915_gpu_idle(struct drm_device *dev)
3426 {
3427 	struct drm_i915_private *dev_priv = dev->dev_private;
3428 	struct intel_engine_cs *engine;
3429 	int ret;
3430 
3431 	/* Flush everything onto the inactive list. */
3432 	for_each_engine(engine, dev_priv) {
3433 		if (!i915.enable_execlists) {
3434 			struct drm_i915_gem_request *req;
3435 
3436 			req = i915_gem_request_alloc(engine, NULL);
3437 			if (IS_ERR(req))
3438 				return PTR_ERR(req);
3439 
3440 			ret = i915_switch_context(req);
3441 			i915_add_request_no_flush(req);
3442 			if (ret)
3443 				return ret;
3444 		}
3445 
3446 		ret = intel_engine_idle(engine);
3447 		if (ret)
3448 			return ret;
3449 	}
3450 
3451 	WARN_ON(i915_verify_lists(dev));
3452 	return 0;
3453 }
3454 
3455 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3456 				     unsigned long cache_level)
3457 {
3458 	struct drm_mm_node *gtt_space = &vma->node;
3459 	struct drm_mm_node *other;
3460 
3461 	/*
3462 	 * On some machines we have to be careful when putting differing types
3463 	 * of snoopable memory together to avoid the prefetcher crossing memory
3464 	 * domains and dying. During vm initialisation, we decide whether or not
3465 	 * these constraints apply and set the drm_mm.color_adjust
3466 	 * appropriately.
3467 	 */
3468 	if (vma->vm->mm.color_adjust == NULL)
3469 		return true;
3470 
3471 	if (!drm_mm_node_allocated(gtt_space))
3472 		return true;
3473 
3474 	if (list_empty(&gtt_space->node_list))
3475 		return true;
3476 
3477 	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3478 	if (other->allocated && !other->hole_follows && other->color != cache_level)
3479 		return false;
3480 
3481 	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3482 	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3483 		return false;
3484 
3485 	return true;
3486 }
3487 
3488 /**
3489  * Finds free space in the GTT aperture and binds the object or a view of it
3490  * there.
3491  */
3492 static struct i915_vma *
3493 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3494 			   struct i915_address_space *vm,
3495 			   const struct i915_ggtt_view *ggtt_view,
3496 			   unsigned alignment,
3497 			   uint64_t flags)
3498 {
3499 	struct drm_device *dev = obj->base.dev;
3500 	struct drm_i915_private *dev_priv = to_i915(dev);
3501 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3502 	u32 fence_alignment, unfenced_alignment;
3503 	u32 search_flag, alloc_flag;
3504 	u64 start, end;
3505 	u64 size, fence_size;
3506 	struct i915_vma *vma;
3507 	int ret;
3508 
3509 	if (i915_is_ggtt(vm)) {
3510 		u32 view_size;
3511 
3512 		if (WARN_ON(!ggtt_view))
3513 			return ERR_PTR(-EINVAL);
3514 
3515 		view_size = i915_ggtt_view_size(obj, ggtt_view);
3516 
3517 		fence_size = i915_gem_get_gtt_size(dev,
3518 						   view_size,
3519 						   obj->tiling_mode);
3520 		fence_alignment = i915_gem_get_gtt_alignment(dev,
3521 							     view_size,
3522 							     obj->tiling_mode,
3523 							     true);
3524 		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3525 								view_size,
3526 								obj->tiling_mode,
3527 								false);
3528 		size = flags & PIN_MAPPABLE ? fence_size : view_size;
3529 	} else {
3530 		fence_size = i915_gem_get_gtt_size(dev,
3531 						   obj->base.size,
3532 						   obj->tiling_mode);
3533 		fence_alignment = i915_gem_get_gtt_alignment(dev,
3534 							     obj->base.size,
3535 							     obj->tiling_mode,
3536 							     true);
3537 		unfenced_alignment =
3538 			i915_gem_get_gtt_alignment(dev,
3539 						   obj->base.size,
3540 						   obj->tiling_mode,
3541 						   false);
3542 		size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3543 	}
3544 
3545 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3546 	end = vm->total;
3547 	if (flags & PIN_MAPPABLE)
3548 		end = min_t(u64, end, ggtt->mappable_end);
3549 	if (flags & PIN_ZONE_4G)
3550 		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3551 
3552 	if (alignment == 0)
3553 		alignment = flags & PIN_MAPPABLE ? fence_alignment :
3554 						unfenced_alignment;
3555 	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3556 		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3557 			  ggtt_view ? ggtt_view->type : 0,
3558 			  alignment);
3559 		return ERR_PTR(-EINVAL);
3560 	}
3561 
3562 	/* If binding the object/GGTT view requires more space than the entire
3563 	 * aperture has, reject it early before evicting everything in a vain
3564 	 * attempt to find space.
3565 	 */
3566 	if (size > end) {
3567 		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3568 			  ggtt_view ? ggtt_view->type : 0,
3569 			  size,
3570 			  flags & PIN_MAPPABLE ? "mappable" : "total",
3571 			  end);
3572 		return ERR_PTR(-E2BIG);
3573 	}
3574 
3575 	ret = i915_gem_object_get_pages(obj);
3576 	if (ret)
3577 		return ERR_PTR(ret);
3578 
3579 	i915_gem_object_pin_pages(obj);
3580 
3581 	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3582 			  i915_gem_obj_lookup_or_create_vma(obj, vm);
3583 
3584 	if (IS_ERR(vma))
3585 		goto err_unpin;
3586 
3587 	if (flags & PIN_OFFSET_FIXED) {
3588 		uint64_t offset = flags & PIN_OFFSET_MASK;
3589 
3590 		if (offset & (alignment - 1) || offset + size > end) {
3591 			ret = -EINVAL;
3592 			goto err_free_vma;
3593 		}
3594 		vma->node.start = offset;
3595 		vma->node.size = size;
3596 		vma->node.color = obj->cache_level;
3597 		ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3598 		if (ret) {
3599 			ret = i915_gem_evict_for_vma(vma);
3600 			if (ret == 0)
3601 				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3602 		}
3603 		if (ret)
3604 			goto err_free_vma;
3605 	} else {
3606 		if (flags & PIN_HIGH) {
3607 			search_flag = DRM_MM_SEARCH_BELOW;
3608 			alloc_flag = DRM_MM_CREATE_TOP;
3609 		} else {
3610 			search_flag = DRM_MM_SEARCH_DEFAULT;
3611 			alloc_flag = DRM_MM_CREATE_DEFAULT;
3612 		}
3613 
3614 search_free:
3615 		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3616 							  size, alignment,
3617 							  obj->cache_level,
3618 							  start, end,
3619 							  search_flag,
3620 							  alloc_flag);
3621 		if (ret) {
3622 			ret = i915_gem_evict_something(dev, vm, size, alignment,
3623 						       obj->cache_level,
3624 						       start, end,
3625 						       flags);
3626 			if (ret == 0)
3627 				goto search_free;
3628 
3629 			goto err_free_vma;
3630 		}
3631 	}
3632 	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3633 		ret = -EINVAL;
3634 		goto err_remove_node;
3635 	}
3636 
3637 	trace_i915_vma_bind(vma, flags);
3638 	ret = i915_vma_bind(vma, obj->cache_level, flags);
3639 	if (ret)
3640 		goto err_remove_node;
3641 
3642 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3643 	list_add_tail(&vma->vm_link, &vm->inactive_list);
3644 
3645 	return vma;
3646 
3647 err_remove_node:
3648 	drm_mm_remove_node(&vma->node);
3649 err_free_vma:
3650 	i915_gem_vma_destroy(vma);
3651 	vma = ERR_PTR(ret);
3652 err_unpin:
3653 	i915_gem_object_unpin_pages(obj);
3654 	return vma;
3655 }
3656 
3657 bool
3658 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3659 			bool force)
3660 {
3661 	/* If we don't have a page list set up, then we're not pinned
3662 	 * to GPU, and we can ignore the cache flush because it'll happen
3663 	 * again at bind time.
3664 	 */
3665 	if (obj->pages == NULL)
3666 		return false;
3667 
3668 	/*
3669 	 * Stolen memory is always coherent with the GPU as it is explicitly
3670 	 * marked as wc by the system, or the system is cache-coherent.
3671 	 */
3672 	if (obj->stolen || obj->phys_handle)
3673 		return false;
3674 
3675 	/* If the GPU is snooping the contents of the CPU cache,
3676 	 * we do not need to manually clear the CPU cache lines.  However,
3677 	 * the caches are only snooped when the render cache is
3678 	 * flushed/invalidated.  As we always have to emit invalidations
3679 	 * and flushes when moving into and out of the RENDER domain, correct
3680 	 * snooping behaviour occurs naturally as the result of our domain
3681 	 * tracking.
3682 	 */
3683 	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3684 		obj->cache_dirty = true;
3685 		return false;
3686 	}
3687 
3688 	trace_i915_gem_object_clflush(obj);
3689 	drm_clflush_sg(obj->pages);
3690 	obj->cache_dirty = false;
3691 
3692 	return true;
3693 }
3694 
3695 /** Flushes the GTT write domain for the object if it's dirty. */
3696 static void
3697 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3698 {
3699 	uint32_t old_write_domain;
3700 
3701 	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3702 		return;
3703 
3704 	/* No actual flushing is required for the GTT write domain.  Writes
3705 	 * to it immediately go to main memory as far as we know, so there's
3706 	 * no chipset flush.  It also doesn't land in render cache.
3707 	 *
3708 	 * However, we do have to enforce the order so that all writes through
3709 	 * the GTT land before any writes to the device, such as updates to
3710 	 * the GATT itself.
3711 	 */
3712 	wmb();
3713 
3714 	old_write_domain = obj->base.write_domain;
3715 	obj->base.write_domain = 0;
3716 
3717 	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3718 
3719 	trace_i915_gem_object_change_domain(obj,
3720 					    obj->base.read_domains,
3721 					    old_write_domain);
3722 }
3723 
3724 /** Flushes the CPU write domain for the object if it's dirty. */
3725 static void
3726 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3727 {
3728 	uint32_t old_write_domain;
3729 
3730 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3731 		return;
3732 
3733 	if (i915_gem_clflush_object(obj, obj->pin_display))
3734 		i915_gem_chipset_flush(obj->base.dev);
3735 
3736 	old_write_domain = obj->base.write_domain;
3737 	obj->base.write_domain = 0;
3738 
3739 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3740 
3741 	trace_i915_gem_object_change_domain(obj,
3742 					    obj->base.read_domains,
3743 					    old_write_domain);
3744 }
3745 
3746 /**
3747  * Moves a single object to the GTT read, and possibly write domain.
3748  *
3749  * This function returns when the move is complete, including waiting on
3750  * flushes to occur.
3751  */
3752 int
3753 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3754 {
3755 	struct drm_device *dev = obj->base.dev;
3756 	struct drm_i915_private *dev_priv = to_i915(dev);
3757 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3758 	uint32_t old_write_domain, old_read_domains;
3759 	struct i915_vma *vma;
3760 	int ret;
3761 
3762 	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3763 		return 0;
3764 
3765 	ret = i915_gem_object_wait_rendering(obj, !write);
3766 	if (ret)
3767 		return ret;
3768 
3769 	/* Flush and acquire obj->pages so that we are coherent through
3770 	 * direct access in memory with previous cached writes through
3771 	 * shmemfs and that our cache domain tracking remains valid.
3772 	 * For example, if the obj->filp was moved to swap without us
3773 	 * being notified and releasing the pages, we would mistakenly
3774 	 * continue to assume that the obj remained out of the CPU cached
3775 	 * domain.
3776 	 */
3777 	ret = i915_gem_object_get_pages(obj);
3778 	if (ret)
3779 		return ret;
3780 
3781 	i915_gem_object_flush_cpu_write_domain(obj);
3782 
3783 	/* Serialise direct access to this object with the barriers for
3784 	 * coherent writes from the GPU, by effectively invalidating the
3785 	 * GTT domain upon first access.
3786 	 */
3787 	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3788 		mb();
3789 
3790 	old_write_domain = obj->base.write_domain;
3791 	old_read_domains = obj->base.read_domains;
3792 
3793 	/* It should now be out of any other write domains, and we can update
3794 	 * the domain values for our changes.
3795 	 */
3796 	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3797 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3798 	if (write) {
3799 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3800 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3801 		obj->dirty = 1;
3802 	}
3803 
3804 	trace_i915_gem_object_change_domain(obj,
3805 					    old_read_domains,
3806 					    old_write_domain);
3807 
3808 	/* And bump the LRU for this access */
3809 	vma = i915_gem_obj_to_ggtt(obj);
3810 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3811 		list_move_tail(&vma->vm_link,
3812 			       &ggtt->base.inactive_list);
3813 
3814 	return 0;
3815 }
3816 
3817 /**
3818  * Changes the cache-level of an object across all VMA.
3819  *
3820  * After this function returns, the object will be in the new cache-level
3821  * across all GTT and the contents of the backing storage will be coherent,
3822  * with respect to the new cache-level. In order to keep the backing storage
3823  * coherent for all users, we only allow a single cache level to be set
3824  * globally on the object and prevent it from being changed whilst the
3825  * hardware is reading from the object. That is if the object is currently
3826  * on the scanout it will be set to uncached (or equivalent display
3827  * cache coherency) and all non-MOCS GPU access will also be uncached so
3828  * that all direct access to the scanout remains coherent.
3829  */
3830 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3831 				    enum i915_cache_level cache_level)
3832 {
3833 	struct drm_device *dev = obj->base.dev;
3834 	struct i915_vma *vma, *next;
3835 	bool bound = false;
3836 	int ret = 0;
3837 
3838 	if (obj->cache_level == cache_level)
3839 		goto out;
3840 
3841 	/* Inspect the list of currently bound VMA and unbind any that would
3842 	 * be invalid given the new cache-level. This is principally to
3843 	 * catch the issue of the CS prefetch crossing page boundaries and
3844 	 * reading an invalid PTE on older architectures.
3845 	 */
3846 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
3847 		if (!drm_mm_node_allocated(&vma->node))
3848 			continue;
3849 
3850 		if (vma->pin_count) {
3851 			DRM_DEBUG("can not change the cache level of pinned objects\n");
3852 			return -EBUSY;
3853 		}
3854 
3855 		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3856 			ret = i915_vma_unbind(vma);
3857 			if (ret)
3858 				return ret;
3859 		} else
3860 			bound = true;
3861 	}
3862 
3863 	/* We can reuse the existing drm_mm nodes but need to change the
3864 	 * cache-level on the PTE. We could simply unbind them all and
3865 	 * rebind with the correct cache-level on next use. However since
3866 	 * we already have a valid slot, dma mapping, pages etc, we may as
3867 	 * rewrite the PTE in the belief that doing so tramples upon less
3868 	 * state and so involves less work.
3869 	 */
3870 	if (bound) {
3871 		/* Before we change the PTE, the GPU must not be accessing it.
3872 		 * If we wait upon the object, we know that all the bound
3873 		 * VMA are no longer active.
3874 		 */
3875 		ret = i915_gem_object_wait_rendering(obj, false);
3876 		if (ret)
3877 			return ret;
3878 
3879 		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
3880 			/* Access to snoopable pages through the GTT is
3881 			 * incoherent and on some machines causes a hard
3882 			 * lockup. Relinquish the CPU mmaping to force
3883 			 * userspace to refault in the pages and we can
3884 			 * then double check if the GTT mapping is still
3885 			 * valid for that pointer access.
3886 			 */
3887 			i915_gem_release_mmap(obj);
3888 
3889 			/* As we no longer need a fence for GTT access,
3890 			 * we can relinquish it now (and so prevent having
3891 			 * to steal a fence from someone else on the next
3892 			 * fence request). Note GPU activity would have
3893 			 * dropped the fence as all snoopable access is
3894 			 * supposed to be linear.
3895 			 */
3896 			ret = i915_gem_object_put_fence(obj);
3897 			if (ret)
3898 				return ret;
3899 		} else {
3900 			/* We either have incoherent backing store and
3901 			 * so no GTT access or the architecture is fully
3902 			 * coherent. In such cases, existing GTT mmaps
3903 			 * ignore the cache bit in the PTE and we can
3904 			 * rewrite it without confusing the GPU or having
3905 			 * to force userspace to fault back in its mmaps.
3906 			 */
3907 		}
3908 
3909 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3910 			if (!drm_mm_node_allocated(&vma->node))
3911 				continue;
3912 
3913 			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3914 			if (ret)
3915 				return ret;
3916 		}
3917 	}
3918 
3919 	list_for_each_entry(vma, &obj->vma_list, obj_link)
3920 		vma->node.color = cache_level;
3921 	obj->cache_level = cache_level;
3922 
3923 out:
3924 	/* Flush the dirty CPU caches to the backing storage so that the
3925 	 * object is now coherent at its new cache level (with respect
3926 	 * to the access domain).
3927 	 */
3928 	if (obj->cache_dirty &&
3929 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3930 	    cpu_write_needs_clflush(obj)) {
3931 		if (i915_gem_clflush_object(obj, true))
3932 			i915_gem_chipset_flush(obj->base.dev);
3933 	}
3934 
3935 	return 0;
3936 }
3937 
3938 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3939 			       struct drm_file *file)
3940 {
3941 	struct drm_i915_gem_caching *args = data;
3942 	struct drm_i915_gem_object *obj;
3943 
3944 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
3945 	if (&obj->base == NULL)
3946 		return -ENOENT;
3947 
3948 	switch (obj->cache_level) {
3949 	case I915_CACHE_LLC:
3950 	case I915_CACHE_L3_LLC:
3951 		args->caching = I915_CACHING_CACHED;
3952 		break;
3953 
3954 	case I915_CACHE_WT:
3955 		args->caching = I915_CACHING_DISPLAY;
3956 		break;
3957 
3958 	default:
3959 		args->caching = I915_CACHING_NONE;
3960 		break;
3961 	}
3962 
3963 	drm_gem_object_unreference_unlocked(&obj->base);
3964 	return 0;
3965 }
3966 
3967 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3968 			       struct drm_file *file)
3969 {
3970 	struct drm_i915_private *dev_priv = dev->dev_private;
3971 	struct drm_i915_gem_caching *args = data;
3972 	struct drm_i915_gem_object *obj;
3973 	enum i915_cache_level level;
3974 	int ret;
3975 
3976 	switch (args->caching) {
3977 	case I915_CACHING_NONE:
3978 		level = I915_CACHE_NONE;
3979 		break;
3980 	case I915_CACHING_CACHED:
3981 		/*
3982 		 * Due to a HW issue on BXT A stepping, GPU stores via a
3983 		 * snooped mapping may leave stale data in a corresponding CPU
3984 		 * cacheline, whereas normally such cachelines would get
3985 		 * invalidated.
3986 		 */
3987 		if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3988 			return -ENODEV;
3989 
3990 		level = I915_CACHE_LLC;
3991 		break;
3992 	case I915_CACHING_DISPLAY:
3993 		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3994 		break;
3995 	default:
3996 		return -EINVAL;
3997 	}
3998 
3999 	intel_runtime_pm_get(dev_priv);
4000 
4001 	ret = i915_mutex_lock_interruptible(dev);
4002 	if (ret)
4003 		goto rpm_put;
4004 
4005 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4006 	if (&obj->base == NULL) {
4007 		ret = -ENOENT;
4008 		goto unlock;
4009 	}
4010 
4011 	ret = i915_gem_object_set_cache_level(obj, level);
4012 
4013 	drm_gem_object_unreference(&obj->base);
4014 unlock:
4015 	mutex_unlock(&dev->struct_mutex);
4016 rpm_put:
4017 	intel_runtime_pm_put(dev_priv);
4018 
4019 	return ret;
4020 }
4021 
4022 /*
4023  * Prepare buffer for display plane (scanout, cursors, etc).
4024  * Can be called from an uninterruptible phase (modesetting) and allows
4025  * any flushes to be pipelined (for pageflips).
4026  */
4027 int
4028 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4029 				     u32 alignment,
4030 				     const struct i915_ggtt_view *view)
4031 {
4032 	u32 old_read_domains, old_write_domain;
4033 	int ret;
4034 
4035 	/* Mark the pin_display early so that we account for the
4036 	 * display coherency whilst setting up the cache domains.
4037 	 */
4038 	obj->pin_display++;
4039 
4040 	/* The display engine is not coherent with the LLC cache on gen6.  As
4041 	 * a result, we make sure that the pinning that is about to occur is
4042 	 * done with uncached PTEs. This is lowest common denominator for all
4043 	 * chipsets.
4044 	 *
4045 	 * However for gen6+, we could do better by using the GFDT bit instead
4046 	 * of uncaching, which would allow us to flush all the LLC-cached data
4047 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4048 	 */
4049 	ret = i915_gem_object_set_cache_level(obj,
4050 					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4051 	if (ret)
4052 		goto err_unpin_display;
4053 
4054 	/* As the user may map the buffer once pinned in the display plane
4055 	 * (e.g. libkms for the bootup splash), we have to ensure that we
4056 	 * always use map_and_fenceable for all scanout buffers.
4057 	 */
4058 	ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4059 				       view->type == I915_GGTT_VIEW_NORMAL ?
4060 				       PIN_MAPPABLE : 0);
4061 	if (ret)
4062 		goto err_unpin_display;
4063 
4064 	i915_gem_object_flush_cpu_write_domain(obj);
4065 
4066 	old_write_domain = obj->base.write_domain;
4067 	old_read_domains = obj->base.read_domains;
4068 
4069 	/* It should now be out of any other write domains, and we can update
4070 	 * the domain values for our changes.
4071 	 */
4072 	obj->base.write_domain = 0;
4073 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4074 
4075 	trace_i915_gem_object_change_domain(obj,
4076 					    old_read_domains,
4077 					    old_write_domain);
4078 
4079 	return 0;
4080 
4081 err_unpin_display:
4082 	obj->pin_display--;
4083 	return ret;
4084 }
4085 
4086 void
4087 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4088 					 const struct i915_ggtt_view *view)
4089 {
4090 	if (WARN_ON(obj->pin_display == 0))
4091 		return;
4092 
4093 	i915_gem_object_ggtt_unpin_view(obj, view);
4094 
4095 	obj->pin_display--;
4096 }
4097 
4098 /**
4099  * Moves a single object to the CPU read, and possibly write domain.
4100  *
4101  * This function returns when the move is complete, including waiting on
4102  * flushes to occur.
4103  */
4104 int
4105 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4106 {
4107 	uint32_t old_write_domain, old_read_domains;
4108 	int ret;
4109 
4110 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4111 		return 0;
4112 
4113 	ret = i915_gem_object_wait_rendering(obj, !write);
4114 	if (ret)
4115 		return ret;
4116 
4117 	i915_gem_object_flush_gtt_write_domain(obj);
4118 
4119 	old_write_domain = obj->base.write_domain;
4120 	old_read_domains = obj->base.read_domains;
4121 
4122 	/* Flush the CPU cache if it's still invalid. */
4123 	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4124 		i915_gem_clflush_object(obj, false);
4125 
4126 		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4127 	}
4128 
4129 	/* It should now be out of any other write domains, and we can update
4130 	 * the domain values for our changes.
4131 	 */
4132 	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4133 
4134 	/* If we're writing through the CPU, then the GPU read domains will
4135 	 * need to be invalidated at next use.
4136 	 */
4137 	if (write) {
4138 		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4139 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4140 	}
4141 
4142 	trace_i915_gem_object_change_domain(obj,
4143 					    old_read_domains,
4144 					    old_write_domain);
4145 
4146 	return 0;
4147 }
4148 
4149 /* Throttle our rendering by waiting until the ring has completed our requests
4150  * emitted over 20 msec ago.
4151  *
4152  * Note that if we were to use the current jiffies each time around the loop,
4153  * we wouldn't escape the function with any frames outstanding if the time to
4154  * render a frame was over 20ms.
4155  *
4156  * This should get us reasonable parallelism between CPU and GPU but also
4157  * relatively low latency when blocking on a particular request to finish.
4158  */
4159 static int
4160 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4161 {
4162 	struct drm_i915_private *dev_priv = dev->dev_private;
4163 	struct drm_i915_file_private *file_priv = file->driver_priv;
4164 	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4165 	struct drm_i915_gem_request *request, *target = NULL;
4166 	int ret;
4167 
4168 	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4169 	if (ret)
4170 		return ret;
4171 
4172 	/* ABI: return -EIO if already wedged */
4173 	if (i915_terminally_wedged(&dev_priv->gpu_error))
4174 		return -EIO;
4175 
4176 	spin_lock(&file_priv->mm.lock);
4177 	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4178 		if (time_after_eq(request->emitted_jiffies, recent_enough))
4179 			break;
4180 
4181 		/*
4182 		 * Note that the request might not have been submitted yet.
4183 		 * In which case emitted_jiffies will be zero.
4184 		 */
4185 		if (!request->emitted_jiffies)
4186 			continue;
4187 
4188 		target = request;
4189 	}
4190 	if (target)
4191 		i915_gem_request_reference(target);
4192 	spin_unlock(&file_priv->mm.lock);
4193 
4194 	if (target == NULL)
4195 		return 0;
4196 
4197 	ret = __i915_wait_request(target, true, NULL, NULL);
4198 	if (ret == 0)
4199 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4200 
4201 	i915_gem_request_unreference__unlocked(target);
4202 
4203 	return ret;
4204 }
4205 
4206 static bool
4207 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4208 {
4209 	struct drm_i915_gem_object *obj = vma->obj;
4210 
4211 	if (alignment &&
4212 	    vma->node.start & (alignment - 1))
4213 		return true;
4214 
4215 	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4216 		return true;
4217 
4218 	if (flags & PIN_OFFSET_BIAS &&
4219 	    vma->node.start < (flags & PIN_OFFSET_MASK))
4220 		return true;
4221 
4222 	if (flags & PIN_OFFSET_FIXED &&
4223 	    vma->node.start != (flags & PIN_OFFSET_MASK))
4224 		return true;
4225 
4226 	return false;
4227 }
4228 
4229 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4230 {
4231 	struct drm_i915_gem_object *obj = vma->obj;
4232 	bool mappable, fenceable;
4233 	u32 fence_size, fence_alignment;
4234 
4235 	fence_size = i915_gem_get_gtt_size(obj->base.dev,
4236 					   obj->base.size,
4237 					   obj->tiling_mode);
4238 	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4239 						     obj->base.size,
4240 						     obj->tiling_mode,
4241 						     true);
4242 
4243 	fenceable = (vma->node.size == fence_size &&
4244 		     (vma->node.start & (fence_alignment - 1)) == 0);
4245 
4246 	mappable = (vma->node.start + fence_size <=
4247 		    to_i915(obj->base.dev)->ggtt.mappable_end);
4248 
4249 	obj->map_and_fenceable = mappable && fenceable;
4250 }
4251 
4252 static int
4253 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4254 		       struct i915_address_space *vm,
4255 		       const struct i915_ggtt_view *ggtt_view,
4256 		       uint32_t alignment,
4257 		       uint64_t flags)
4258 {
4259 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4260 	struct i915_vma *vma;
4261 	unsigned bound;
4262 	int ret;
4263 
4264 	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4265 		return -ENODEV;
4266 
4267 	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4268 		return -EINVAL;
4269 
4270 	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4271 		return -EINVAL;
4272 
4273 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4274 		return -EINVAL;
4275 
4276 	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4277 			  i915_gem_obj_to_vma(obj, vm);
4278 
4279 	if (vma) {
4280 		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4281 			return -EBUSY;
4282 
4283 		if (i915_vma_misplaced(vma, alignment, flags)) {
4284 			WARN(vma->pin_count,
4285 			     "bo is already pinned in %s with incorrect alignment:"
4286 			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4287 			     " obj->map_and_fenceable=%d\n",
4288 			     ggtt_view ? "ggtt" : "ppgtt",
4289 			     upper_32_bits(vma->node.start),
4290 			     lower_32_bits(vma->node.start),
4291 			     alignment,
4292 			     !!(flags & PIN_MAPPABLE),
4293 			     obj->map_and_fenceable);
4294 			ret = i915_vma_unbind(vma);
4295 			if (ret)
4296 				return ret;
4297 
4298 			vma = NULL;
4299 		}
4300 	}
4301 
4302 	bound = vma ? vma->bound : 0;
4303 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4304 		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4305 						 flags);
4306 		if (IS_ERR(vma))
4307 			return PTR_ERR(vma);
4308 	} else {
4309 		ret = i915_vma_bind(vma, obj->cache_level, flags);
4310 		if (ret)
4311 			return ret;
4312 	}
4313 
4314 	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4315 	    (bound ^ vma->bound) & GLOBAL_BIND) {
4316 		__i915_vma_set_map_and_fenceable(vma);
4317 		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4318 	}
4319 
4320 	vma->pin_count++;
4321 	return 0;
4322 }
4323 
4324 int
4325 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4326 		    struct i915_address_space *vm,
4327 		    uint32_t alignment,
4328 		    uint64_t flags)
4329 {
4330 	return i915_gem_object_do_pin(obj, vm,
4331 				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4332 				      alignment, flags);
4333 }
4334 
4335 int
4336 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4337 			 const struct i915_ggtt_view *view,
4338 			 uint32_t alignment,
4339 			 uint64_t flags)
4340 {
4341 	struct drm_device *dev = obj->base.dev;
4342 	struct drm_i915_private *dev_priv = to_i915(dev);
4343 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
4344 
4345 	BUG_ON(!view);
4346 
4347 	return i915_gem_object_do_pin(obj, &ggtt->base, view,
4348 				      alignment, flags | PIN_GLOBAL);
4349 }
4350 
4351 void
4352 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4353 				const struct i915_ggtt_view *view)
4354 {
4355 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4356 
4357 	WARN_ON(vma->pin_count == 0);
4358 	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4359 
4360 	--vma->pin_count;
4361 }
4362 
4363 int
4364 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4365 		    struct drm_file *file)
4366 {
4367 	struct drm_i915_gem_busy *args = data;
4368 	struct drm_i915_gem_object *obj;
4369 	int ret;
4370 
4371 	ret = i915_mutex_lock_interruptible(dev);
4372 	if (ret)
4373 		return ret;
4374 
4375 	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4376 	if (&obj->base == NULL) {
4377 		ret = -ENOENT;
4378 		goto unlock;
4379 	}
4380 
4381 	/* Count all active objects as busy, even if they are currently not used
4382 	 * by the gpu. Users of this interface expect objects to eventually
4383 	 * become non-busy without any further actions, therefore emit any
4384 	 * necessary flushes here.
4385 	 */
4386 	ret = i915_gem_object_flush_active(obj);
4387 	if (ret)
4388 		goto unref;
4389 
4390 	args->busy = 0;
4391 	if (obj->active) {
4392 		int i;
4393 
4394 		for (i = 0; i < I915_NUM_ENGINES; i++) {
4395 			struct drm_i915_gem_request *req;
4396 
4397 			req = obj->last_read_req[i];
4398 			if (req)
4399 				args->busy |= 1 << (16 + req->engine->exec_id);
4400 		}
4401 		if (obj->last_write_req)
4402 			args->busy |= obj->last_write_req->engine->exec_id;
4403 	}
4404 
4405 unref:
4406 	drm_gem_object_unreference(&obj->base);
4407 unlock:
4408 	mutex_unlock(&dev->struct_mutex);
4409 	return ret;
4410 }
4411 
4412 int
4413 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4414 			struct drm_file *file_priv)
4415 {
4416 	return i915_gem_ring_throttle(dev, file_priv);
4417 }
4418 
4419 int
4420 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4421 		       struct drm_file *file_priv)
4422 {
4423 	struct drm_i915_private *dev_priv = dev->dev_private;
4424 	struct drm_i915_gem_madvise *args = data;
4425 	struct drm_i915_gem_object *obj;
4426 	int ret;
4427 
4428 	switch (args->madv) {
4429 	case I915_MADV_DONTNEED:
4430 	case I915_MADV_WILLNEED:
4431 	    break;
4432 	default:
4433 	    return -EINVAL;
4434 	}
4435 
4436 	ret = i915_mutex_lock_interruptible(dev);
4437 	if (ret)
4438 		return ret;
4439 
4440 	obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
4441 	if (&obj->base == NULL) {
4442 		ret = -ENOENT;
4443 		goto unlock;
4444 	}
4445 
4446 	if (i915_gem_obj_is_pinned(obj)) {
4447 		ret = -EINVAL;
4448 		goto out;
4449 	}
4450 
4451 	if (obj->pages &&
4452 	    obj->tiling_mode != I915_TILING_NONE &&
4453 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4454 		if (obj->madv == I915_MADV_WILLNEED)
4455 			i915_gem_object_unpin_pages(obj);
4456 		if (args->madv == I915_MADV_WILLNEED)
4457 			i915_gem_object_pin_pages(obj);
4458 	}
4459 
4460 	if (obj->madv != __I915_MADV_PURGED)
4461 		obj->madv = args->madv;
4462 
4463 	/* if the object is no longer attached, discard its backing storage */
4464 	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4465 		i915_gem_object_truncate(obj);
4466 
4467 	args->retained = obj->madv != __I915_MADV_PURGED;
4468 
4469 out:
4470 	drm_gem_object_unreference(&obj->base);
4471 unlock:
4472 	mutex_unlock(&dev->struct_mutex);
4473 	return ret;
4474 }
4475 
4476 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4477 			  const struct drm_i915_gem_object_ops *ops)
4478 {
4479 	int i;
4480 
4481 	INIT_LIST_HEAD(&obj->global_list);
4482 	for (i = 0; i < I915_NUM_ENGINES; i++)
4483 		INIT_LIST_HEAD(&obj->engine_list[i]);
4484 	INIT_LIST_HEAD(&obj->obj_exec_link);
4485 	INIT_LIST_HEAD(&obj->vma_list);
4486 	INIT_LIST_HEAD(&obj->batch_pool_link);
4487 
4488 	obj->ops = ops;
4489 
4490 	obj->fence_reg = I915_FENCE_REG_NONE;
4491 	obj->madv = I915_MADV_WILLNEED;
4492 
4493 	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4494 }
4495 
4496 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4497 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4498 	.get_pages = i915_gem_object_get_pages_gtt,
4499 	.put_pages = i915_gem_object_put_pages_gtt,
4500 };
4501 
4502 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4503 						  size_t size)
4504 {
4505 	struct drm_i915_gem_object *obj;
4506 	struct address_space *mapping;
4507 	gfp_t mask;
4508 
4509 	obj = i915_gem_object_alloc(dev);
4510 	if (obj == NULL)
4511 		return NULL;
4512 
4513 	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4514 		i915_gem_object_free(obj);
4515 		return NULL;
4516 	}
4517 
4518 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4519 	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4520 		/* 965gm cannot relocate objects above 4GiB. */
4521 		mask &= ~__GFP_HIGHMEM;
4522 		mask |= __GFP_DMA32;
4523 	}
4524 
4525 	mapping = file_inode(obj->base.filp)->i_mapping;
4526 	mapping_set_gfp_mask(mapping, mask);
4527 
4528 	i915_gem_object_init(obj, &i915_gem_object_ops);
4529 
4530 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4531 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4532 
4533 	if (HAS_LLC(dev)) {
4534 		/* On some devices, we can have the GPU use the LLC (the CPU
4535 		 * cache) for about a 10% performance improvement
4536 		 * compared to uncached.  Graphics requests other than
4537 		 * display scanout are coherent with the CPU in
4538 		 * accessing this cache.  This means in this mode we
4539 		 * don't need to clflush on the CPU side, and on the
4540 		 * GPU side we only need to flush internal caches to
4541 		 * get data visible to the CPU.
4542 		 *
4543 		 * However, we maintain the display planes as UC, and so
4544 		 * need to rebind when first used as such.
4545 		 */
4546 		obj->cache_level = I915_CACHE_LLC;
4547 	} else
4548 		obj->cache_level = I915_CACHE_NONE;
4549 
4550 	trace_i915_gem_object_create(obj);
4551 
4552 	return obj;
4553 }
4554 
4555 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4556 {
4557 	/* If we are the last user of the backing storage (be it shmemfs
4558 	 * pages or stolen etc), we know that the pages are going to be
4559 	 * immediately released. In this case, we can then skip copying
4560 	 * back the contents from the GPU.
4561 	 */
4562 
4563 	if (obj->madv != I915_MADV_WILLNEED)
4564 		return false;
4565 
4566 	if (obj->base.filp == NULL)
4567 		return true;
4568 
4569 	/* At first glance, this looks racy, but then again so would be
4570 	 * userspace racing mmap against close. However, the first external
4571 	 * reference to the filp can only be obtained through the
4572 	 * i915_gem_mmap_ioctl() which safeguards us against the user
4573 	 * acquiring such a reference whilst we are in the middle of
4574 	 * freeing the object.
4575 	 */
4576 	return atomic_long_read(&obj->base.filp->f_count) == 1;
4577 }
4578 
4579 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4580 {
4581 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4582 	struct drm_device *dev = obj->base.dev;
4583 	struct drm_i915_private *dev_priv = dev->dev_private;
4584 	struct i915_vma *vma, *next;
4585 
4586 	intel_runtime_pm_get(dev_priv);
4587 
4588 	trace_i915_gem_object_destroy(obj);
4589 
4590 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4591 		int ret;
4592 
4593 		vma->pin_count = 0;
4594 		ret = i915_vma_unbind(vma);
4595 		if (WARN_ON(ret == -ERESTARTSYS)) {
4596 			bool was_interruptible;
4597 
4598 			was_interruptible = dev_priv->mm.interruptible;
4599 			dev_priv->mm.interruptible = false;
4600 
4601 			WARN_ON(i915_vma_unbind(vma));
4602 
4603 			dev_priv->mm.interruptible = was_interruptible;
4604 		}
4605 	}
4606 
4607 	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4608 	 * before progressing. */
4609 	if (obj->stolen)
4610 		i915_gem_object_unpin_pages(obj);
4611 
4612 	WARN_ON(obj->frontbuffer_bits);
4613 
4614 	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4615 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4616 	    obj->tiling_mode != I915_TILING_NONE)
4617 		i915_gem_object_unpin_pages(obj);
4618 
4619 	if (WARN_ON(obj->pages_pin_count))
4620 		obj->pages_pin_count = 0;
4621 	if (discard_backing_storage(obj))
4622 		obj->madv = I915_MADV_DONTNEED;
4623 	i915_gem_object_put_pages(obj);
4624 	i915_gem_object_free_mmap_offset(obj);
4625 
4626 	BUG_ON(obj->pages);
4627 
4628 	if (obj->base.import_attach)
4629 		drm_prime_gem_destroy(&obj->base, NULL);
4630 
4631 	if (obj->ops->release)
4632 		obj->ops->release(obj);
4633 
4634 	drm_gem_object_release(&obj->base);
4635 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
4636 
4637 	kfree(obj->bit_17);
4638 	i915_gem_object_free(obj);
4639 
4640 	intel_runtime_pm_put(dev_priv);
4641 }
4642 
4643 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4644 				     struct i915_address_space *vm)
4645 {
4646 	struct i915_vma *vma;
4647 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
4648 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4649 		    vma->vm == vm)
4650 			return vma;
4651 	}
4652 	return NULL;
4653 }
4654 
4655 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4656 					   const struct i915_ggtt_view *view)
4657 {
4658 	struct drm_device *dev = obj->base.dev;
4659 	struct drm_i915_private *dev_priv = to_i915(dev);
4660 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
4661 	struct i915_vma *vma;
4662 
4663 	BUG_ON(!view);
4664 
4665 	list_for_each_entry(vma, &obj->vma_list, obj_link)
4666 		if (vma->vm == &ggtt->base &&
4667 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
4668 			return vma;
4669 	return NULL;
4670 }
4671 
4672 void i915_gem_vma_destroy(struct i915_vma *vma)
4673 {
4674 	WARN_ON(vma->node.allocated);
4675 
4676 	/* Keep the vma as a placeholder in the execbuffer reservation lists */
4677 	if (!list_empty(&vma->exec_list))
4678 		return;
4679 
4680 	if (!vma->is_ggtt)
4681 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4682 
4683 	list_del(&vma->obj_link);
4684 
4685 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4686 }
4687 
4688 static void
4689 i915_gem_stop_engines(struct drm_device *dev)
4690 {
4691 	struct drm_i915_private *dev_priv = dev->dev_private;
4692 	struct intel_engine_cs *engine;
4693 
4694 	for_each_engine(engine, dev_priv)
4695 		dev_priv->gt.stop_engine(engine);
4696 }
4697 
4698 int
4699 i915_gem_suspend(struct drm_device *dev)
4700 {
4701 	struct drm_i915_private *dev_priv = dev->dev_private;
4702 	int ret = 0;
4703 
4704 	mutex_lock(&dev->struct_mutex);
4705 	ret = i915_gpu_idle(dev);
4706 	if (ret)
4707 		goto err;
4708 
4709 	i915_gem_retire_requests(dev);
4710 
4711 	i915_gem_stop_engines(dev);
4712 	mutex_unlock(&dev->struct_mutex);
4713 
4714 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4715 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4716 	flush_delayed_work(&dev_priv->mm.idle_work);
4717 
4718 	/* Assert that we sucessfully flushed all the work and
4719 	 * reset the GPU back to its idle, low power state.
4720 	 */
4721 	WARN_ON(dev_priv->mm.busy);
4722 
4723 	return 0;
4724 
4725 err:
4726 	mutex_unlock(&dev->struct_mutex);
4727 	return ret;
4728 }
4729 
4730 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4731 {
4732 	struct intel_engine_cs *engine = req->engine;
4733 	struct drm_device *dev = engine->dev;
4734 	struct drm_i915_private *dev_priv = dev->dev_private;
4735 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4736 	int i, ret;
4737 
4738 	if (!HAS_L3_DPF(dev) || !remap_info)
4739 		return 0;
4740 
4741 	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4742 	if (ret)
4743 		return ret;
4744 
4745 	/*
4746 	 * Note: We do not worry about the concurrent register cacheline hang
4747 	 * here because no other code should access these registers other than
4748 	 * at initialization time.
4749 	 */
4750 	for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4751 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
4752 		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
4753 		intel_ring_emit(engine, remap_info[i]);
4754 	}
4755 
4756 	intel_ring_advance(engine);
4757 
4758 	return ret;
4759 }
4760 
4761 void i915_gem_init_swizzling(struct drm_device *dev)
4762 {
4763 	struct drm_i915_private *dev_priv = dev->dev_private;
4764 
4765 	if (INTEL_INFO(dev)->gen < 5 ||
4766 	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4767 		return;
4768 
4769 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4770 				 DISP_TILE_SURFACE_SWIZZLING);
4771 
4772 	if (IS_GEN5(dev))
4773 		return;
4774 
4775 	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4776 	if (IS_GEN6(dev))
4777 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4778 	else if (IS_GEN7(dev))
4779 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4780 	else if (IS_GEN8(dev))
4781 		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4782 	else
4783 		BUG();
4784 }
4785 
4786 static void init_unused_ring(struct drm_device *dev, u32 base)
4787 {
4788 	struct drm_i915_private *dev_priv = dev->dev_private;
4789 
4790 	I915_WRITE(RING_CTL(base), 0);
4791 	I915_WRITE(RING_HEAD(base), 0);
4792 	I915_WRITE(RING_TAIL(base), 0);
4793 	I915_WRITE(RING_START(base), 0);
4794 }
4795 
4796 static void init_unused_rings(struct drm_device *dev)
4797 {
4798 	if (IS_I830(dev)) {
4799 		init_unused_ring(dev, PRB1_BASE);
4800 		init_unused_ring(dev, SRB0_BASE);
4801 		init_unused_ring(dev, SRB1_BASE);
4802 		init_unused_ring(dev, SRB2_BASE);
4803 		init_unused_ring(dev, SRB3_BASE);
4804 	} else if (IS_GEN2(dev)) {
4805 		init_unused_ring(dev, SRB0_BASE);
4806 		init_unused_ring(dev, SRB1_BASE);
4807 	} else if (IS_GEN3(dev)) {
4808 		init_unused_ring(dev, PRB1_BASE);
4809 		init_unused_ring(dev, PRB2_BASE);
4810 	}
4811 }
4812 
4813 int i915_gem_init_engines(struct drm_device *dev)
4814 {
4815 	struct drm_i915_private *dev_priv = dev->dev_private;
4816 	int ret;
4817 
4818 	ret = intel_init_render_ring_buffer(dev);
4819 	if (ret)
4820 		return ret;
4821 
4822 	if (HAS_BSD(dev)) {
4823 		ret = intel_init_bsd_ring_buffer(dev);
4824 		if (ret)
4825 			goto cleanup_render_ring;
4826 	}
4827 
4828 	if (HAS_BLT(dev)) {
4829 		ret = intel_init_blt_ring_buffer(dev);
4830 		if (ret)
4831 			goto cleanup_bsd_ring;
4832 	}
4833 
4834 	if (HAS_VEBOX(dev)) {
4835 		ret = intel_init_vebox_ring_buffer(dev);
4836 		if (ret)
4837 			goto cleanup_blt_ring;
4838 	}
4839 
4840 	if (HAS_BSD2(dev)) {
4841 		ret = intel_init_bsd2_ring_buffer(dev);
4842 		if (ret)
4843 			goto cleanup_vebox_ring;
4844 	}
4845 
4846 	return 0;
4847 
4848 cleanup_vebox_ring:
4849 	intel_cleanup_engine(&dev_priv->engine[VECS]);
4850 cleanup_blt_ring:
4851 	intel_cleanup_engine(&dev_priv->engine[BCS]);
4852 cleanup_bsd_ring:
4853 	intel_cleanup_engine(&dev_priv->engine[VCS]);
4854 cleanup_render_ring:
4855 	intel_cleanup_engine(&dev_priv->engine[RCS]);
4856 
4857 	return ret;
4858 }
4859 
4860 int
4861 i915_gem_init_hw(struct drm_device *dev)
4862 {
4863 	struct drm_i915_private *dev_priv = dev->dev_private;
4864 	struct intel_engine_cs *engine;
4865 	int ret, j;
4866 
4867 	/* Double layer security blanket, see i915_gem_init() */
4868 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4869 
4870 	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4871 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4872 
4873 	if (IS_HASWELL(dev))
4874 		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4875 			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4876 
4877 	if (HAS_PCH_NOP(dev)) {
4878 		if (IS_IVYBRIDGE(dev)) {
4879 			u32 temp = I915_READ(GEN7_MSG_CTL);
4880 			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4881 			I915_WRITE(GEN7_MSG_CTL, temp);
4882 		} else if (INTEL_INFO(dev)->gen >= 7) {
4883 			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4884 			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4885 			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4886 		}
4887 	}
4888 
4889 	i915_gem_init_swizzling(dev);
4890 
4891 	/*
4892 	 * At least 830 can leave some of the unused rings
4893 	 * "active" (ie. head != tail) after resume which
4894 	 * will prevent c3 entry. Makes sure all unused rings
4895 	 * are totally idle.
4896 	 */
4897 	init_unused_rings(dev);
4898 
4899 	BUG_ON(!dev_priv->kernel_context);
4900 
4901 	ret = i915_ppgtt_init_hw(dev);
4902 	if (ret) {
4903 		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4904 		goto out;
4905 	}
4906 
4907 	/* Need to do basic initialisation of all rings first: */
4908 	for_each_engine(engine, dev_priv) {
4909 		ret = engine->init_hw(engine);
4910 		if (ret)
4911 			goto out;
4912 	}
4913 
4914 	intel_mocs_init_l3cc_table(dev);
4915 
4916 	/* We can't enable contexts until all firmware is loaded */
4917 	if (HAS_GUC_UCODE(dev)) {
4918 		ret = intel_guc_ucode_load(dev);
4919 		if (ret) {
4920 			DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4921 			ret = -EIO;
4922 			goto out;
4923 		}
4924 	}
4925 
4926 	/*
4927 	 * Increment the next seqno by 0x100 so we have a visible break
4928 	 * on re-initialisation
4929 	 */
4930 	ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4931 	if (ret)
4932 		goto out;
4933 
4934 	/* Now it is safe to go back round and do everything else: */
4935 	for_each_engine(engine, dev_priv) {
4936 		struct drm_i915_gem_request *req;
4937 
4938 		req = i915_gem_request_alloc(engine, NULL);
4939 		if (IS_ERR(req)) {
4940 			ret = PTR_ERR(req);
4941 			break;
4942 		}
4943 
4944 		if (engine->id == RCS) {
4945 			for (j = 0; j < NUM_L3_SLICES(dev); j++) {
4946 				ret = i915_gem_l3_remap(req, j);
4947 				if (ret)
4948 					goto err_request;
4949 			}
4950 		}
4951 
4952 		ret = i915_ppgtt_init_ring(req);
4953 		if (ret)
4954 			goto err_request;
4955 
4956 		ret = i915_gem_context_enable(req);
4957 		if (ret)
4958 			goto err_request;
4959 
4960 err_request:
4961 		i915_add_request_no_flush(req);
4962 		if (ret) {
4963 			DRM_ERROR("Failed to enable %s, error=%d\n",
4964 				  engine->name, ret);
4965 			i915_gem_cleanup_engines(dev);
4966 			break;
4967 		}
4968 	}
4969 
4970 out:
4971 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4972 	return ret;
4973 }
4974 
4975 int i915_gem_init(struct drm_device *dev)
4976 {
4977 	struct drm_i915_private *dev_priv = dev->dev_private;
4978 	int ret;
4979 
4980 	i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4981 			i915.enable_execlists);
4982 
4983 	mutex_lock(&dev->struct_mutex);
4984 
4985 	if (!i915.enable_execlists) {
4986 		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
4987 		dev_priv->gt.init_engines = i915_gem_init_engines;
4988 		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
4989 		dev_priv->gt.stop_engine = intel_stop_engine;
4990 	} else {
4991 		dev_priv->gt.execbuf_submit = intel_execlists_submission;
4992 		dev_priv->gt.init_engines = intel_logical_rings_init;
4993 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4994 		dev_priv->gt.stop_engine = intel_logical_ring_stop;
4995 	}
4996 
4997 	/* This is just a security blanket to placate dragons.
4998 	 * On some systems, we very sporadically observe that the first TLBs
4999 	 * used by the CS may be stale, despite us poking the TLB reset. If
5000 	 * we hold the forcewake during initialisation these problems
5001 	 * just magically go away.
5002 	 */
5003 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5004 
5005 	ret = i915_gem_init_userptr(dev);
5006 	if (ret)
5007 		goto out_unlock;
5008 
5009 	i915_gem_init_ggtt(dev);
5010 
5011 	ret = i915_gem_context_init(dev);
5012 	if (ret)
5013 		goto out_unlock;
5014 
5015 	ret = dev_priv->gt.init_engines(dev);
5016 	if (ret)
5017 		goto out_unlock;
5018 
5019 	ret = i915_gem_init_hw(dev);
5020 	if (ret == -EIO) {
5021 		/* Allow ring initialisation to fail by marking the GPU as
5022 		 * wedged. But we only want to do this where the GPU is angry,
5023 		 * for all other failure, such as an allocation failure, bail.
5024 		 */
5025 		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5026 		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5027 		ret = 0;
5028 	}
5029 
5030 out_unlock:
5031 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5032 	mutex_unlock(&dev->struct_mutex);
5033 
5034 	return ret;
5035 }
5036 
5037 void
5038 i915_gem_cleanup_engines(struct drm_device *dev)
5039 {
5040 	struct drm_i915_private *dev_priv = dev->dev_private;
5041 	struct intel_engine_cs *engine;
5042 
5043 	for_each_engine(engine, dev_priv)
5044 		dev_priv->gt.cleanup_engine(engine);
5045 
5046 	if (i915.enable_execlists)
5047 		/*
5048 		 * Neither the BIOS, ourselves or any other kernel
5049 		 * expects the system to be in execlists mode on startup,
5050 		 * so we need to reset the GPU back to legacy mode.
5051 		 */
5052 		intel_gpu_reset(dev, ALL_ENGINES);
5053 }
5054 
5055 static void
5056 init_engine_lists(struct intel_engine_cs *engine)
5057 {
5058 	INIT_LIST_HEAD(&engine->active_list);
5059 	INIT_LIST_HEAD(&engine->request_list);
5060 }
5061 
5062 void
5063 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5064 {
5065 	struct drm_device *dev = dev_priv->dev;
5066 
5067 	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5068 	    !IS_CHERRYVIEW(dev_priv))
5069 		dev_priv->num_fence_regs = 32;
5070 	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
5071 		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
5072 		dev_priv->num_fence_regs = 16;
5073 	else
5074 		dev_priv->num_fence_regs = 8;
5075 
5076 	if (intel_vgpu_active(dev))
5077 		dev_priv->num_fence_regs =
5078 				I915_READ(vgtif_reg(avail_rs.fence_num));
5079 
5080 	/* Initialize fence registers to zero */
5081 	i915_gem_restore_fences(dev);
5082 
5083 	i915_gem_detect_bit_6_swizzle(dev);
5084 }
5085 
5086 void
5087 i915_gem_load_init(struct drm_device *dev)
5088 {
5089 	struct drm_i915_private *dev_priv = dev->dev_private;
5090 	int i;
5091 
5092 	dev_priv->objects =
5093 		kmem_cache_create("i915_gem_object",
5094 				  sizeof(struct drm_i915_gem_object), 0,
5095 				  SLAB_HWCACHE_ALIGN,
5096 				  NULL);
5097 	dev_priv->vmas =
5098 		kmem_cache_create("i915_gem_vma",
5099 				  sizeof(struct i915_vma), 0,
5100 				  SLAB_HWCACHE_ALIGN,
5101 				  NULL);
5102 	dev_priv->requests =
5103 		kmem_cache_create("i915_gem_request",
5104 				  sizeof(struct drm_i915_gem_request), 0,
5105 				  SLAB_HWCACHE_ALIGN,
5106 				  NULL);
5107 
5108 	INIT_LIST_HEAD(&dev_priv->vm_list);
5109 	INIT_LIST_HEAD(&dev_priv->context_list);
5110 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5111 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5112 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5113 	for (i = 0; i < I915_NUM_ENGINES; i++)
5114 		init_engine_lists(&dev_priv->engine[i]);
5115 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5116 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5117 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5118 			  i915_gem_retire_work_handler);
5119 	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5120 			  i915_gem_idle_work_handler);
5121 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5122 
5123 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5124 
5125 	/*
5126 	 * Set initial sequence number for requests.
5127 	 * Using this number allows the wraparound to happen early,
5128 	 * catching any obvious problems.
5129 	 */
5130 	dev_priv->next_seqno = ((u32)~0 - 0x1100);
5131 	dev_priv->last_seqno = ((u32)~0 - 0x1101);
5132 
5133 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5134 
5135 	init_waitqueue_head(&dev_priv->pending_flip_queue);
5136 
5137 	dev_priv->mm.interruptible = true;
5138 
5139 	mutex_init(&dev_priv->fb_tracking.lock);
5140 }
5141 
5142 void i915_gem_load_cleanup(struct drm_device *dev)
5143 {
5144 	struct drm_i915_private *dev_priv = to_i915(dev);
5145 
5146 	kmem_cache_destroy(dev_priv->requests);
5147 	kmem_cache_destroy(dev_priv->vmas);
5148 	kmem_cache_destroy(dev_priv->objects);
5149 }
5150 
5151 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5152 {
5153 	struct drm_i915_file_private *file_priv = file->driver_priv;
5154 
5155 	/* Clean up our request list when the client is going away, so that
5156 	 * later retire_requests won't dereference our soon-to-be-gone
5157 	 * file_priv.
5158 	 */
5159 	spin_lock(&file_priv->mm.lock);
5160 	while (!list_empty(&file_priv->mm.request_list)) {
5161 		struct drm_i915_gem_request *request;
5162 
5163 		request = list_first_entry(&file_priv->mm.request_list,
5164 					   struct drm_i915_gem_request,
5165 					   client_list);
5166 		list_del(&request->client_list);
5167 		request->file_priv = NULL;
5168 	}
5169 	spin_unlock(&file_priv->mm.lock);
5170 
5171 	if (!list_empty(&file_priv->rps.link)) {
5172 		spin_lock(&to_i915(dev)->rps.client_lock);
5173 		list_del(&file_priv->rps.link);
5174 		spin_unlock(&to_i915(dev)->rps.client_lock);
5175 	}
5176 }
5177 
5178 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5179 {
5180 	struct drm_i915_file_private *file_priv;
5181 	int ret;
5182 
5183 	DRM_DEBUG_DRIVER("\n");
5184 
5185 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5186 	if (!file_priv)
5187 		return -ENOMEM;
5188 
5189 	file->driver_priv = file_priv;
5190 	file_priv->dev_priv = dev->dev_private;
5191 	file_priv->file = file;
5192 	INIT_LIST_HEAD(&file_priv->rps.link);
5193 
5194 	spin_lock_init(&file_priv->mm.lock);
5195 	INIT_LIST_HEAD(&file_priv->mm.request_list);
5196 
5197 	file_priv->bsd_ring = -1;
5198 
5199 	ret = i915_gem_context_open(dev, file);
5200 	if (ret)
5201 		kfree(file_priv);
5202 
5203 	return ret;
5204 }
5205 
5206 /**
5207  * i915_gem_track_fb - update frontbuffer tracking
5208  * @old: current GEM buffer for the frontbuffer slots
5209  * @new: new GEM buffer for the frontbuffer slots
5210  * @frontbuffer_bits: bitmask of frontbuffer slots
5211  *
5212  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5213  * from @old and setting them in @new. Both @old and @new can be NULL.
5214  */
5215 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5216 		       struct drm_i915_gem_object *new,
5217 		       unsigned frontbuffer_bits)
5218 {
5219 	if (old) {
5220 		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5221 		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5222 		old->frontbuffer_bits &= ~frontbuffer_bits;
5223 	}
5224 
5225 	if (new) {
5226 		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5227 		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5228 		new->frontbuffer_bits |= frontbuffer_bits;
5229 	}
5230 }
5231 
5232 /* All the new VM stuff */
5233 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5234 			struct i915_address_space *vm)
5235 {
5236 	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5237 	struct i915_vma *vma;
5238 
5239 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5240 
5241 	list_for_each_entry(vma, &o->vma_list, obj_link) {
5242 		if (vma->is_ggtt &&
5243 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5244 			continue;
5245 		if (vma->vm == vm)
5246 			return vma->node.start;
5247 	}
5248 
5249 	WARN(1, "%s vma for this object not found.\n",
5250 	     i915_is_ggtt(vm) ? "global" : "ppgtt");
5251 	return -1;
5252 }
5253 
5254 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5255 				  const struct i915_ggtt_view *view)
5256 {
5257 	struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5258 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
5259 	struct i915_vma *vma;
5260 
5261 	list_for_each_entry(vma, &o->vma_list, obj_link)
5262 		if (vma->vm == &ggtt->base &&
5263 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
5264 			return vma->node.start;
5265 
5266 	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5267 	return -1;
5268 }
5269 
5270 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5271 			struct i915_address_space *vm)
5272 {
5273 	struct i915_vma *vma;
5274 
5275 	list_for_each_entry(vma, &o->vma_list, obj_link) {
5276 		if (vma->is_ggtt &&
5277 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5278 			continue;
5279 		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5280 			return true;
5281 	}
5282 
5283 	return false;
5284 }
5285 
5286 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5287 				  const struct i915_ggtt_view *view)
5288 {
5289 	struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5290 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
5291 	struct i915_vma *vma;
5292 
5293 	list_for_each_entry(vma, &o->vma_list, obj_link)
5294 		if (vma->vm == &ggtt->base &&
5295 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5296 		    drm_mm_node_allocated(&vma->node))
5297 			return true;
5298 
5299 	return false;
5300 }
5301 
5302 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5303 {
5304 	struct i915_vma *vma;
5305 
5306 	list_for_each_entry(vma, &o->vma_list, obj_link)
5307 		if (drm_mm_node_allocated(&vma->node))
5308 			return true;
5309 
5310 	return false;
5311 }
5312 
5313 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5314 				struct i915_address_space *vm)
5315 {
5316 	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5317 	struct i915_vma *vma;
5318 
5319 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5320 
5321 	BUG_ON(list_empty(&o->vma_list));
5322 
5323 	list_for_each_entry(vma, &o->vma_list, obj_link) {
5324 		if (vma->is_ggtt &&
5325 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5326 			continue;
5327 		if (vma->vm == vm)
5328 			return vma->node.size;
5329 	}
5330 	return 0;
5331 }
5332 
5333 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5334 {
5335 	struct i915_vma *vma;
5336 	list_for_each_entry(vma, &obj->vma_list, obj_link)
5337 		if (vma->pin_count > 0)
5338 			return true;
5339 
5340 	return false;
5341 }
5342 
5343 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5344 struct page *
5345 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5346 {
5347 	struct page *page;
5348 
5349 	/* Only default objects have per-page dirty tracking */
5350 	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
5351 		return NULL;
5352 
5353 	page = i915_gem_object_get_page(obj, n);
5354 	set_page_dirty(page);
5355 	return page;
5356 }
5357 
5358 /* Allocate a new GEM object and fill it with the supplied data */
5359 struct drm_i915_gem_object *
5360 i915_gem_object_create_from_data(struct drm_device *dev,
5361 			         const void *data, size_t size)
5362 {
5363 	struct drm_i915_gem_object *obj;
5364 	struct sg_table *sg;
5365 	size_t bytes;
5366 	int ret;
5367 
5368 	obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
5369 	if (IS_ERR_OR_NULL(obj))
5370 		return obj;
5371 
5372 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
5373 	if (ret)
5374 		goto fail;
5375 
5376 	ret = i915_gem_object_get_pages(obj);
5377 	if (ret)
5378 		goto fail;
5379 
5380 	i915_gem_object_pin_pages(obj);
5381 	sg = obj->pages;
5382 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
5383 	obj->dirty = 1;		/* Backing store is now out of date */
5384 	i915_gem_object_unpin_pages(obj);
5385 
5386 	if (WARN_ON(bytes != size)) {
5387 		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5388 		ret = -EFAULT;
5389 		goto fail;
5390 	}
5391 
5392 	return obj;
5393 
5394 fail:
5395 	drm_gem_object_unreference(&obj->base);
5396 	return ERR_PTR(ret);
5397 }
5398