xref: /openbmc/linux/drivers/gpu/drm/i915/i915_gem.c (revision 593692d2)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/dma-fence-array.h>
29 #include <linux/kthread.h>
30 #include <linux/dma-resv.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/slab.h>
33 #include <linux/stop_machine.h>
34 #include <linux/swap.h>
35 #include <linux/pci.h>
36 #include <linux/dma-buf.h>
37 #include <linux/mman.h>
38 
39 #include <drm/drm_cache.h>
40 #include <drm/drm_vma_manager.h>
41 
42 #include "display/intel_display.h"
43 #include "display/intel_frontbuffer.h"
44 
45 #include "gem/i915_gem_clflush.h"
46 #include "gem/i915_gem_context.h"
47 #include "gem/i915_gem_ioctls.h"
48 #include "gem/i915_gem_mman.h"
49 #include "gem/i915_gem_pm.h"
50 #include "gem/i915_gem_region.h"
51 #include "gem/i915_gem_userptr.h"
52 #include "gt/intel_engine_user.h"
53 #include "gt/intel_gt.h"
54 #include "gt/intel_gt_pm.h"
55 #include "gt/intel_workarounds.h"
56 
57 #include "i915_drv.h"
58 #include "i915_file_private.h"
59 #include "i915_trace.h"
60 #include "i915_vgpu.h"
61 #include "intel_pm.h"
62 
63 static int
64 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
65 {
66 	int err;
67 
68 	err = mutex_lock_interruptible(&ggtt->vm.mutex);
69 	if (err)
70 		return err;
71 
72 	memset(node, 0, sizeof(*node));
73 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
74 					  size, 0, I915_COLOR_UNEVICTABLE,
75 					  0, ggtt->mappable_end,
76 					  DRM_MM_INSERT_LOW);
77 
78 	mutex_unlock(&ggtt->vm.mutex);
79 
80 	return err;
81 }
82 
83 static void
84 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
85 {
86 	mutex_lock(&ggtt->vm.mutex);
87 	drm_mm_remove_node(node);
88 	mutex_unlock(&ggtt->vm.mutex);
89 }
90 
91 int
92 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
93 			    struct drm_file *file)
94 {
95 	struct drm_i915_private *i915 = to_i915(dev);
96 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
97 	struct drm_i915_gem_get_aperture *args = data;
98 	struct i915_vma *vma;
99 	u64 pinned;
100 
101 	if (mutex_lock_interruptible(&ggtt->vm.mutex))
102 		return -EINTR;
103 
104 	pinned = ggtt->vm.reserved;
105 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
106 		if (i915_vma_is_pinned(vma))
107 			pinned += vma->node.size;
108 
109 	mutex_unlock(&ggtt->vm.mutex);
110 
111 	args->aper_size = ggtt->vm.total;
112 	args->aper_available_size = args->aper_size - pinned;
113 
114 	return 0;
115 }
116 
117 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
118 			   unsigned long flags)
119 {
120 	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
121 	LIST_HEAD(still_in_list);
122 	intel_wakeref_t wakeref;
123 	struct i915_vma *vma;
124 	int ret;
125 
126 	assert_object_held(obj);
127 
128 	if (list_empty(&obj->vma.list))
129 		return 0;
130 
131 	/*
132 	 * As some machines use ACPI to handle runtime-resume callbacks, and
133 	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
134 	 * as they are required by the shrinker. Ergo, we wake the device up
135 	 * first just in case.
136 	 */
137 	wakeref = intel_runtime_pm_get(rpm);
138 
139 try_again:
140 	ret = 0;
141 	spin_lock(&obj->vma.lock);
142 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
143 						       struct i915_vma,
144 						       obj_link))) {
145 		struct i915_address_space *vm = vma->vm;
146 
147 		list_move_tail(&vma->obj_link, &still_in_list);
148 		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
149 			continue;
150 
151 		if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
152 			ret = -EBUSY;
153 			break;
154 		}
155 
156 		ret = -EAGAIN;
157 		if (!i915_vm_tryopen(vm))
158 			break;
159 
160 		/* Prevent vma being freed by i915_vma_parked as we unbind */
161 		vma = __i915_vma_get(vma);
162 		spin_unlock(&obj->vma.lock);
163 
164 		if (vma) {
165 			bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
166 			ret = -EBUSY;
167 			if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
168 				assert_object_held(vma->obj);
169 				ret = i915_vma_unbind_async(vma, vm_trylock);
170 			}
171 
172 			if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
173 					      !i915_vma_is_active(vma))) {
174 				if (vm_trylock) {
175 					if (mutex_trylock(&vma->vm->mutex)) {
176 						ret = __i915_vma_unbind(vma);
177 						mutex_unlock(&vma->vm->mutex);
178 					} else {
179 						ret = -EBUSY;
180 					}
181 				} else {
182 					ret = i915_vma_unbind(vma);
183 				}
184 			}
185 
186 			__i915_vma_put(vma);
187 		}
188 
189 		i915_vm_close(vm);
190 		spin_lock(&obj->vma.lock);
191 	}
192 	list_splice_init(&still_in_list, &obj->vma.list);
193 	spin_unlock(&obj->vma.lock);
194 
195 	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
196 		rcu_barrier(); /* flush the i915_vm_release() */
197 		goto try_again;
198 	}
199 
200 	intel_runtime_pm_put(rpm, wakeref);
201 
202 	return ret;
203 }
204 
205 static int
206 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
207 	    bool needs_clflush)
208 {
209 	char *vaddr;
210 	int ret;
211 
212 	vaddr = kmap(page);
213 
214 	if (needs_clflush)
215 		drm_clflush_virt_range(vaddr + offset, len);
216 
217 	ret = __copy_to_user(user_data, vaddr + offset, len);
218 
219 	kunmap(page);
220 
221 	return ret ? -EFAULT : 0;
222 }
223 
224 static int
225 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
226 		     struct drm_i915_gem_pread *args)
227 {
228 	unsigned int needs_clflush;
229 	unsigned int idx, offset;
230 	char __user *user_data;
231 	u64 remain;
232 	int ret;
233 
234 	ret = i915_gem_object_lock_interruptible(obj, NULL);
235 	if (ret)
236 		return ret;
237 
238 	ret = i915_gem_object_pin_pages(obj);
239 	if (ret)
240 		goto err_unlock;
241 
242 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
243 	if (ret)
244 		goto err_unpin;
245 
246 	i915_gem_object_finish_access(obj);
247 	i915_gem_object_unlock(obj);
248 
249 	remain = args->size;
250 	user_data = u64_to_user_ptr(args->data_ptr);
251 	offset = offset_in_page(args->offset);
252 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
253 		struct page *page = i915_gem_object_get_page(obj, idx);
254 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
255 
256 		ret = shmem_pread(page, offset, length, user_data,
257 				  needs_clflush);
258 		if (ret)
259 			break;
260 
261 		remain -= length;
262 		user_data += length;
263 		offset = 0;
264 	}
265 
266 	i915_gem_object_unpin_pages(obj);
267 	return ret;
268 
269 err_unpin:
270 	i915_gem_object_unpin_pages(obj);
271 err_unlock:
272 	i915_gem_object_unlock(obj);
273 	return ret;
274 }
275 
276 static inline bool
277 gtt_user_read(struct io_mapping *mapping,
278 	      loff_t base, int offset,
279 	      char __user *user_data, int length)
280 {
281 	void __iomem *vaddr;
282 	unsigned long unwritten;
283 
284 	/* We can use the cpu mem copy function because this is X86. */
285 	vaddr = io_mapping_map_atomic_wc(mapping, base);
286 	unwritten = __copy_to_user_inatomic(user_data,
287 					    (void __force *)vaddr + offset,
288 					    length);
289 	io_mapping_unmap_atomic(vaddr);
290 	if (unwritten) {
291 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
292 		unwritten = copy_to_user(user_data,
293 					 (void __force *)vaddr + offset,
294 					 length);
295 		io_mapping_unmap(vaddr);
296 	}
297 	return unwritten;
298 }
299 
300 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
301 					     struct drm_mm_node *node,
302 					     bool write)
303 {
304 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
305 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
306 	struct i915_vma *vma;
307 	struct i915_gem_ww_ctx ww;
308 	int ret;
309 
310 	i915_gem_ww_ctx_init(&ww, true);
311 retry:
312 	vma = ERR_PTR(-ENODEV);
313 	ret = i915_gem_object_lock(obj, &ww);
314 	if (ret)
315 		goto err_ww;
316 
317 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
318 	if (ret)
319 		goto err_ww;
320 
321 	if (!i915_gem_object_is_tiled(obj))
322 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
323 						  PIN_MAPPABLE |
324 						  PIN_NONBLOCK /* NOWARN */ |
325 						  PIN_NOEVICT);
326 	if (vma == ERR_PTR(-EDEADLK)) {
327 		ret = -EDEADLK;
328 		goto err_ww;
329 	} else if (!IS_ERR(vma)) {
330 		node->start = i915_ggtt_offset(vma);
331 		node->flags = 0;
332 	} else {
333 		ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
334 		if (ret)
335 			goto err_ww;
336 		GEM_BUG_ON(!drm_mm_node_allocated(node));
337 		vma = NULL;
338 	}
339 
340 	ret = i915_gem_object_pin_pages(obj);
341 	if (ret) {
342 		if (drm_mm_node_allocated(node)) {
343 			ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
344 			remove_mappable_node(ggtt, node);
345 		} else {
346 			i915_vma_unpin(vma);
347 		}
348 	}
349 
350 err_ww:
351 	if (ret == -EDEADLK) {
352 		ret = i915_gem_ww_ctx_backoff(&ww);
353 		if (!ret)
354 			goto retry;
355 	}
356 	i915_gem_ww_ctx_fini(&ww);
357 
358 	return ret ? ERR_PTR(ret) : vma;
359 }
360 
361 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
362 				 struct drm_mm_node *node,
363 				 struct i915_vma *vma)
364 {
365 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
366 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
367 
368 	i915_gem_object_unpin_pages(obj);
369 	if (drm_mm_node_allocated(node)) {
370 		ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
371 		remove_mappable_node(ggtt, node);
372 	} else {
373 		i915_vma_unpin(vma);
374 	}
375 }
376 
377 static int
378 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
379 		   const struct drm_i915_gem_pread *args)
380 {
381 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
382 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
383 	intel_wakeref_t wakeref;
384 	struct drm_mm_node node;
385 	void __user *user_data;
386 	struct i915_vma *vma;
387 	u64 remain, offset;
388 	int ret = 0;
389 
390 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
391 
392 	vma = i915_gem_gtt_prepare(obj, &node, false);
393 	if (IS_ERR(vma)) {
394 		ret = PTR_ERR(vma);
395 		goto out_rpm;
396 	}
397 
398 	user_data = u64_to_user_ptr(args->data_ptr);
399 	remain = args->size;
400 	offset = args->offset;
401 
402 	while (remain > 0) {
403 		/* Operation in this page
404 		 *
405 		 * page_base = page offset within aperture
406 		 * page_offset = offset within page
407 		 * page_length = bytes to copy for this page
408 		 */
409 		u32 page_base = node.start;
410 		unsigned page_offset = offset_in_page(offset);
411 		unsigned page_length = PAGE_SIZE - page_offset;
412 		page_length = remain < page_length ? remain : page_length;
413 		if (drm_mm_node_allocated(&node)) {
414 			ggtt->vm.insert_page(&ggtt->vm,
415 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
416 					     node.start, I915_CACHE_NONE, 0);
417 		} else {
418 			page_base += offset & PAGE_MASK;
419 		}
420 
421 		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
422 				  user_data, page_length)) {
423 			ret = -EFAULT;
424 			break;
425 		}
426 
427 		remain -= page_length;
428 		user_data += page_length;
429 		offset += page_length;
430 	}
431 
432 	i915_gem_gtt_cleanup(obj, &node, vma);
433 out_rpm:
434 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
435 	return ret;
436 }
437 
438 /**
439  * Reads data from the object referenced by handle.
440  * @dev: drm device pointer
441  * @data: ioctl data blob
442  * @file: drm file pointer
443  *
444  * On error, the contents of *data are undefined.
445  */
446 int
447 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
448 		     struct drm_file *file)
449 {
450 	struct drm_i915_private *i915 = to_i915(dev);
451 	struct drm_i915_gem_pread *args = data;
452 	struct drm_i915_gem_object *obj;
453 	int ret;
454 
455 	/* PREAD is disallowed for all platforms after TGL-LP.  This also
456 	 * covers all platforms with local memory.
457 	 */
458 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
459 		return -EOPNOTSUPP;
460 
461 	if (args->size == 0)
462 		return 0;
463 
464 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
465 		       args->size))
466 		return -EFAULT;
467 
468 	obj = i915_gem_object_lookup(file, args->handle);
469 	if (!obj)
470 		return -ENOENT;
471 
472 	/* Bounds check source.  */
473 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
474 		ret = -EINVAL;
475 		goto out;
476 	}
477 
478 	trace_i915_gem_object_pread(obj, args->offset, args->size);
479 	ret = -ENODEV;
480 	if (obj->ops->pread)
481 		ret = obj->ops->pread(obj, args);
482 	if (ret != -ENODEV)
483 		goto out;
484 
485 	ret = i915_gem_object_wait(obj,
486 				   I915_WAIT_INTERRUPTIBLE,
487 				   MAX_SCHEDULE_TIMEOUT);
488 	if (ret)
489 		goto out;
490 
491 	ret = i915_gem_shmem_pread(obj, args);
492 	if (ret == -EFAULT || ret == -ENODEV)
493 		ret = i915_gem_gtt_pread(obj, args);
494 
495 out:
496 	i915_gem_object_put(obj);
497 	return ret;
498 }
499 
500 /* This is the fast write path which cannot handle
501  * page faults in the source data
502  */
503 
504 static inline bool
505 ggtt_write(struct io_mapping *mapping,
506 	   loff_t base, int offset,
507 	   char __user *user_data, int length)
508 {
509 	void __iomem *vaddr;
510 	unsigned long unwritten;
511 
512 	/* We can use the cpu mem copy function because this is X86. */
513 	vaddr = io_mapping_map_atomic_wc(mapping, base);
514 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
515 						      user_data, length);
516 	io_mapping_unmap_atomic(vaddr);
517 	if (unwritten) {
518 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
519 		unwritten = copy_from_user((void __force *)vaddr + offset,
520 					   user_data, length);
521 		io_mapping_unmap(vaddr);
522 	}
523 
524 	return unwritten;
525 }
526 
527 /**
528  * This is the fast pwrite path, where we copy the data directly from the
529  * user into the GTT, uncached.
530  * @obj: i915 GEM object
531  * @args: pwrite arguments structure
532  */
533 static int
534 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
535 			 const struct drm_i915_gem_pwrite *args)
536 {
537 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
538 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
539 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
540 	intel_wakeref_t wakeref;
541 	struct drm_mm_node node;
542 	struct i915_vma *vma;
543 	u64 remain, offset;
544 	void __user *user_data;
545 	int ret = 0;
546 
547 	if (i915_gem_object_has_struct_page(obj)) {
548 		/*
549 		 * Avoid waking the device up if we can fallback, as
550 		 * waking/resuming is very slow (worst-case 10-100 ms
551 		 * depending on PCI sleeps and our own resume time).
552 		 * This easily dwarfs any performance advantage from
553 		 * using the cache bypass of indirect GGTT access.
554 		 */
555 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
556 		if (!wakeref)
557 			return -EFAULT;
558 	} else {
559 		/* No backing pages, no fallback, we must force GGTT access */
560 		wakeref = intel_runtime_pm_get(rpm);
561 	}
562 
563 	vma = i915_gem_gtt_prepare(obj, &node, true);
564 	if (IS_ERR(vma)) {
565 		ret = PTR_ERR(vma);
566 		goto out_rpm;
567 	}
568 
569 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
570 
571 	user_data = u64_to_user_ptr(args->data_ptr);
572 	offset = args->offset;
573 	remain = args->size;
574 	while (remain) {
575 		/* Operation in this page
576 		 *
577 		 * page_base = page offset within aperture
578 		 * page_offset = offset within page
579 		 * page_length = bytes to copy for this page
580 		 */
581 		u32 page_base = node.start;
582 		unsigned int page_offset = offset_in_page(offset);
583 		unsigned int page_length = PAGE_SIZE - page_offset;
584 		page_length = remain < page_length ? remain : page_length;
585 		if (drm_mm_node_allocated(&node)) {
586 			/* flush the write before we modify the GGTT */
587 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
588 			ggtt->vm.insert_page(&ggtt->vm,
589 					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
590 					     node.start, I915_CACHE_NONE, 0);
591 			wmb(); /* flush modifications to the GGTT (insert_page) */
592 		} else {
593 			page_base += offset & PAGE_MASK;
594 		}
595 		/* If we get a fault while copying data, then (presumably) our
596 		 * source page isn't available.  Return the error and we'll
597 		 * retry in the slow path.
598 		 * If the object is non-shmem backed, we retry again with the
599 		 * path that handles page fault.
600 		 */
601 		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
602 			       user_data, page_length)) {
603 			ret = -EFAULT;
604 			break;
605 		}
606 
607 		remain -= page_length;
608 		user_data += page_length;
609 		offset += page_length;
610 	}
611 
612 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
613 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
614 
615 	i915_gem_gtt_cleanup(obj, &node, vma);
616 out_rpm:
617 	intel_runtime_pm_put(rpm, wakeref);
618 	return ret;
619 }
620 
621 /* Per-page copy function for the shmem pwrite fastpath.
622  * Flushes invalid cachelines before writing to the target if
623  * needs_clflush_before is set and flushes out any written cachelines after
624  * writing if needs_clflush is set.
625  */
626 static int
627 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
628 	     bool needs_clflush_before,
629 	     bool needs_clflush_after)
630 {
631 	char *vaddr;
632 	int ret;
633 
634 	vaddr = kmap(page);
635 
636 	if (needs_clflush_before)
637 		drm_clflush_virt_range(vaddr + offset, len);
638 
639 	ret = __copy_from_user(vaddr + offset, user_data, len);
640 	if (!ret && needs_clflush_after)
641 		drm_clflush_virt_range(vaddr + offset, len);
642 
643 	kunmap(page);
644 
645 	return ret ? -EFAULT : 0;
646 }
647 
648 static int
649 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
650 		      const struct drm_i915_gem_pwrite *args)
651 {
652 	unsigned int partial_cacheline_write;
653 	unsigned int needs_clflush;
654 	unsigned int offset, idx;
655 	void __user *user_data;
656 	u64 remain;
657 	int ret;
658 
659 	ret = i915_gem_object_lock_interruptible(obj, NULL);
660 	if (ret)
661 		return ret;
662 
663 	ret = i915_gem_object_pin_pages(obj);
664 	if (ret)
665 		goto err_unlock;
666 
667 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
668 	if (ret)
669 		goto err_unpin;
670 
671 	i915_gem_object_finish_access(obj);
672 	i915_gem_object_unlock(obj);
673 
674 	/* If we don't overwrite a cacheline completely we need to be
675 	 * careful to have up-to-date data by first clflushing. Don't
676 	 * overcomplicate things and flush the entire patch.
677 	 */
678 	partial_cacheline_write = 0;
679 	if (needs_clflush & CLFLUSH_BEFORE)
680 		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
681 
682 	user_data = u64_to_user_ptr(args->data_ptr);
683 	remain = args->size;
684 	offset = offset_in_page(args->offset);
685 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
686 		struct page *page = i915_gem_object_get_page(obj, idx);
687 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
688 
689 		ret = shmem_pwrite(page, offset, length, user_data,
690 				   (offset | length) & partial_cacheline_write,
691 				   needs_clflush & CLFLUSH_AFTER);
692 		if (ret)
693 			break;
694 
695 		remain -= length;
696 		user_data += length;
697 		offset = 0;
698 	}
699 
700 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
701 
702 	i915_gem_object_unpin_pages(obj);
703 	return ret;
704 
705 err_unpin:
706 	i915_gem_object_unpin_pages(obj);
707 err_unlock:
708 	i915_gem_object_unlock(obj);
709 	return ret;
710 }
711 
712 /**
713  * Writes data to the object referenced by handle.
714  * @dev: drm device
715  * @data: ioctl data blob
716  * @file: drm file
717  *
718  * On error, the contents of the buffer that were to be modified are undefined.
719  */
720 int
721 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
722 		      struct drm_file *file)
723 {
724 	struct drm_i915_private *i915 = to_i915(dev);
725 	struct drm_i915_gem_pwrite *args = data;
726 	struct drm_i915_gem_object *obj;
727 	int ret;
728 
729 	/* PWRITE is disallowed for all platforms after TGL-LP.  This also
730 	 * covers all platforms with local memory.
731 	 */
732 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
733 		return -EOPNOTSUPP;
734 
735 	if (args->size == 0)
736 		return 0;
737 
738 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
739 		return -EFAULT;
740 
741 	obj = i915_gem_object_lookup(file, args->handle);
742 	if (!obj)
743 		return -ENOENT;
744 
745 	/* Bounds check destination. */
746 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
747 		ret = -EINVAL;
748 		goto err;
749 	}
750 
751 	/* Writes not allowed into this read-only object */
752 	if (i915_gem_object_is_readonly(obj)) {
753 		ret = -EINVAL;
754 		goto err;
755 	}
756 
757 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
758 
759 	ret = -ENODEV;
760 	if (obj->ops->pwrite)
761 		ret = obj->ops->pwrite(obj, args);
762 	if (ret != -ENODEV)
763 		goto err;
764 
765 	ret = i915_gem_object_wait(obj,
766 				   I915_WAIT_INTERRUPTIBLE |
767 				   I915_WAIT_ALL,
768 				   MAX_SCHEDULE_TIMEOUT);
769 	if (ret)
770 		goto err;
771 
772 	ret = -EFAULT;
773 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
774 	 * it would end up going through the fenced access, and we'll get
775 	 * different detiling behavior between reading and writing.
776 	 * pread/pwrite currently are reading and writing from the CPU
777 	 * perspective, requiring manual detiling by the client.
778 	 */
779 	if (!i915_gem_object_has_struct_page(obj) ||
780 	    i915_gem_cpu_write_needs_clflush(obj))
781 		/* Note that the gtt paths might fail with non-page-backed user
782 		 * pointers (e.g. gtt mappings when moving data between
783 		 * textures). Fallback to the shmem path in that case.
784 		 */
785 		ret = i915_gem_gtt_pwrite_fast(obj, args);
786 
787 	if (ret == -EFAULT || ret == -ENOSPC) {
788 		if (i915_gem_object_has_struct_page(obj))
789 			ret = i915_gem_shmem_pwrite(obj, args);
790 	}
791 
792 err:
793 	i915_gem_object_put(obj);
794 	return ret;
795 }
796 
797 /**
798  * Called when user space has done writes to this buffer
799  * @dev: drm device
800  * @data: ioctl data blob
801  * @file: drm file
802  */
803 int
804 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
805 			 struct drm_file *file)
806 {
807 	struct drm_i915_gem_sw_finish *args = data;
808 	struct drm_i915_gem_object *obj;
809 
810 	obj = i915_gem_object_lookup(file, args->handle);
811 	if (!obj)
812 		return -ENOENT;
813 
814 	/*
815 	 * Proxy objects are barred from CPU access, so there is no
816 	 * need to ban sw_finish as it is a nop.
817 	 */
818 
819 	/* Pinned buffers may be scanout, so flush the cache */
820 	i915_gem_object_flush_if_display(obj);
821 	i915_gem_object_put(obj);
822 
823 	return 0;
824 }
825 
826 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
827 {
828 	struct drm_i915_gem_object *obj, *on;
829 	int i;
830 
831 	/*
832 	 * Only called during RPM suspend. All users of the userfault_list
833 	 * must be holding an RPM wakeref to ensure that this can not
834 	 * run concurrently with themselves (and use the struct_mutex for
835 	 * protection between themselves).
836 	 */
837 
838 	list_for_each_entry_safe(obj, on,
839 				 &to_gt(i915)->ggtt->userfault_list, userfault_link)
840 		__i915_gem_object_release_mmap_gtt(obj);
841 
842 	/*
843 	 * The fence will be lost when the device powers down. If any were
844 	 * in use by hardware (i.e. they are pinned), we should not be powering
845 	 * down! All other fences will be reacquired by the user upon waking.
846 	 */
847 	for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
848 		struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
849 
850 		/*
851 		 * Ideally we want to assert that the fence register is not
852 		 * live at this point (i.e. that no piece of code will be
853 		 * trying to write through fence + GTT, as that both violates
854 		 * our tracking of activity and associated locking/barriers,
855 		 * but also is illegal given that the hw is powered down).
856 		 *
857 		 * Previously we used reg->pin_count as a "liveness" indicator.
858 		 * That is not sufficient, and we need a more fine-grained
859 		 * tool if we want to have a sanity check here.
860 		 */
861 
862 		if (!reg->vma)
863 			continue;
864 
865 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
866 		reg->dirty = true;
867 	}
868 }
869 
870 static void discard_ggtt_vma(struct i915_vma *vma)
871 {
872 	struct drm_i915_gem_object *obj = vma->obj;
873 
874 	spin_lock(&obj->vma.lock);
875 	if (!RB_EMPTY_NODE(&vma->obj_node)) {
876 		rb_erase(&vma->obj_node, &obj->vma.tree);
877 		RB_CLEAR_NODE(&vma->obj_node);
878 	}
879 	spin_unlock(&obj->vma.lock);
880 }
881 
882 struct i915_vma *
883 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
884 			    struct i915_gem_ww_ctx *ww,
885 			    const struct i915_ggtt_view *view,
886 			    u64 size, u64 alignment, u64 flags)
887 {
888 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
889 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
890 	struct i915_vma *vma;
891 	int ret;
892 
893 	GEM_WARN_ON(!ww);
894 
895 	if (flags & PIN_MAPPABLE &&
896 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
897 		/*
898 		 * If the required space is larger than the available
899 		 * aperture, we will not able to find a slot for the
900 		 * object and unbinding the object now will be in
901 		 * vain. Worse, doing so may cause us to ping-pong
902 		 * the object in and out of the Global GTT and
903 		 * waste a lot of cycles under the mutex.
904 		 */
905 		if (obj->base.size > ggtt->mappable_end)
906 			return ERR_PTR(-E2BIG);
907 
908 		/*
909 		 * If NONBLOCK is set the caller is optimistically
910 		 * trying to cache the full object within the mappable
911 		 * aperture, and *must* have a fallback in place for
912 		 * situations where we cannot bind the object. We
913 		 * can be a little more lax here and use the fallback
914 		 * more often to avoid costly migrations of ourselves
915 		 * and other objects within the aperture.
916 		 *
917 		 * Half-the-aperture is used as a simple heuristic.
918 		 * More interesting would to do search for a free
919 		 * block prior to making the commitment to unbind.
920 		 * That caters for the self-harm case, and with a
921 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
922 		 * we could try to minimise harm to others.
923 		 */
924 		if (flags & PIN_NONBLOCK &&
925 		    obj->base.size > ggtt->mappable_end / 2)
926 			return ERR_PTR(-ENOSPC);
927 	}
928 
929 new_vma:
930 	vma = i915_vma_instance(obj, &ggtt->vm, view);
931 	if (IS_ERR(vma))
932 		return vma;
933 
934 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
935 		if (flags & PIN_NONBLOCK) {
936 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
937 				return ERR_PTR(-ENOSPC);
938 
939 			if (flags & PIN_MAPPABLE &&
940 			    vma->fence_size > ggtt->mappable_end / 2)
941 				return ERR_PTR(-ENOSPC);
942 		}
943 
944 		if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
945 			discard_ggtt_vma(vma);
946 			goto new_vma;
947 		}
948 
949 		ret = i915_vma_unbind(vma);
950 		if (ret)
951 			return ERR_PTR(ret);
952 	}
953 
954 	ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
955 
956 	if (ret)
957 		return ERR_PTR(ret);
958 
959 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
960 		mutex_lock(&ggtt->vm.mutex);
961 		i915_vma_revoke_fence(vma);
962 		mutex_unlock(&ggtt->vm.mutex);
963 	}
964 
965 	ret = i915_vma_wait_for_bind(vma);
966 	if (ret) {
967 		i915_vma_unpin(vma);
968 		return ERR_PTR(ret);
969 	}
970 
971 	return vma;
972 }
973 
974 struct i915_vma * __must_check
975 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
976 			 const struct i915_ggtt_view *view,
977 			 u64 size, u64 alignment, u64 flags)
978 {
979 	struct i915_gem_ww_ctx ww;
980 	struct i915_vma *ret;
981 	int err;
982 
983 	for_i915_gem_ww(&ww, err, true) {
984 		err = i915_gem_object_lock(obj, &ww);
985 		if (err)
986 			continue;
987 
988 		ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
989 						  alignment, flags);
990 		if (IS_ERR(ret))
991 			err = PTR_ERR(ret);
992 	}
993 
994 	return err ? ERR_PTR(err) : ret;
995 }
996 
997 int
998 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
999 		       struct drm_file *file_priv)
1000 {
1001 	struct drm_i915_private *i915 = to_i915(dev);
1002 	struct drm_i915_gem_madvise *args = data;
1003 	struct drm_i915_gem_object *obj;
1004 	int err;
1005 
1006 	switch (args->madv) {
1007 	case I915_MADV_DONTNEED:
1008 	case I915_MADV_WILLNEED:
1009 	    break;
1010 	default:
1011 	    return -EINVAL;
1012 	}
1013 
1014 	obj = i915_gem_object_lookup(file_priv, args->handle);
1015 	if (!obj)
1016 		return -ENOENT;
1017 
1018 	err = i915_gem_object_lock_interruptible(obj, NULL);
1019 	if (err)
1020 		goto out;
1021 
1022 	if (i915_gem_object_has_pages(obj) &&
1023 	    i915_gem_object_is_tiled(obj) &&
1024 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1025 		if (obj->mm.madv == I915_MADV_WILLNEED) {
1026 			GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1027 			i915_gem_object_clear_tiling_quirk(obj);
1028 			i915_gem_object_make_shrinkable(obj);
1029 		}
1030 		if (args->madv == I915_MADV_WILLNEED) {
1031 			GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1032 			i915_gem_object_make_unshrinkable(obj);
1033 			i915_gem_object_set_tiling_quirk(obj);
1034 		}
1035 	}
1036 
1037 	if (obj->mm.madv != __I915_MADV_PURGED) {
1038 		obj->mm.madv = args->madv;
1039 		if (obj->ops->adjust_lru)
1040 			obj->ops->adjust_lru(obj);
1041 	}
1042 
1043 	if (i915_gem_object_has_pages(obj) ||
1044 	    i915_gem_object_has_self_managed_shrink_list(obj)) {
1045 		unsigned long flags;
1046 
1047 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
1048 		if (!list_empty(&obj->mm.link)) {
1049 			struct list_head *list;
1050 
1051 			if (obj->mm.madv != I915_MADV_WILLNEED)
1052 				list = &i915->mm.purge_list;
1053 			else
1054 				list = &i915->mm.shrink_list;
1055 			list_move_tail(&obj->mm.link, list);
1056 
1057 		}
1058 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1059 	}
1060 
1061 	/* if the object is no longer attached, discard its backing storage */
1062 	if (obj->mm.madv == I915_MADV_DONTNEED &&
1063 	    !i915_gem_object_has_pages(obj))
1064 		i915_gem_object_truncate(obj);
1065 
1066 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1067 
1068 	i915_gem_object_unlock(obj);
1069 out:
1070 	i915_gem_object_put(obj);
1071 	return err;
1072 }
1073 
1074 int i915_gem_init(struct drm_i915_private *dev_priv)
1075 {
1076 	int ret;
1077 
1078 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
1079 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1080 		mkwrite_device_info(dev_priv)->page_sizes =
1081 			I915_GTT_PAGE_SIZE_4K;
1082 
1083 	ret = i915_gem_init_userptr(dev_priv);
1084 	if (ret)
1085 		return ret;
1086 
1087 	intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
1088 	intel_wopcm_init(&dev_priv->wopcm);
1089 
1090 	ret = i915_init_ggtt(dev_priv);
1091 	if (ret) {
1092 		GEM_BUG_ON(ret == -EIO);
1093 		goto err_unlock;
1094 	}
1095 
1096 	/*
1097 	 * Despite its name intel_init_clock_gating applies both display
1098 	 * clock gating workarounds; GT mmio workarounds and the occasional
1099 	 * GT power context workaround. Worse, sometimes it includes a context
1100 	 * register workaround which we need to apply before we record the
1101 	 * default HW state for all contexts.
1102 	 *
1103 	 * FIXME: break up the workarounds and apply them at the right time!
1104 	 */
1105 	intel_init_clock_gating(dev_priv);
1106 
1107 	ret = intel_gt_init(to_gt(dev_priv));
1108 	if (ret)
1109 		goto err_unlock;
1110 
1111 	return 0;
1112 
1113 	/*
1114 	 * Unwinding is complicated by that we want to handle -EIO to mean
1115 	 * disable GPU submission but keep KMS alive. We want to mark the
1116 	 * HW as irrevisibly wedged, but keep enough state around that the
1117 	 * driver doesn't explode during runtime.
1118 	 */
1119 err_unlock:
1120 	i915_gem_drain_workqueue(dev_priv);
1121 
1122 	if (ret != -EIO)
1123 		intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
1124 
1125 	if (ret == -EIO) {
1126 		/*
1127 		 * Allow engines or uC initialisation to fail by marking the GPU
1128 		 * as wedged. But we only want to do this when the GPU is angry,
1129 		 * for all other failure, such as an allocation failure, bail.
1130 		 */
1131 		if (!intel_gt_is_wedged(to_gt(dev_priv))) {
1132 			i915_probe_error(dev_priv,
1133 					 "Failed to initialize GPU, declaring it wedged!\n");
1134 			intel_gt_set_wedged(to_gt(dev_priv));
1135 		}
1136 
1137 		/* Minimal basic recovery for KMS */
1138 		ret = i915_ggtt_enable_hw(dev_priv);
1139 		i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1140 		intel_init_clock_gating(dev_priv);
1141 	}
1142 
1143 	i915_gem_drain_freed_objects(dev_priv);
1144 
1145 	return ret;
1146 }
1147 
1148 void i915_gem_driver_register(struct drm_i915_private *i915)
1149 {
1150 	i915_gem_driver_register__shrinker(i915);
1151 
1152 	intel_engines_driver_register(i915);
1153 }
1154 
1155 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1156 {
1157 	i915_gem_driver_unregister__shrinker(i915);
1158 }
1159 
1160 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1161 {
1162 	intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
1163 
1164 	i915_gem_suspend_late(dev_priv);
1165 	intel_gt_driver_remove(to_gt(dev_priv));
1166 	dev_priv->uabi_engines = RB_ROOT;
1167 
1168 	/* Flush any outstanding unpin_work. */
1169 	i915_gem_drain_workqueue(dev_priv);
1170 
1171 	i915_gem_drain_freed_objects(dev_priv);
1172 }
1173 
1174 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1175 {
1176 	intel_gt_driver_release(to_gt(dev_priv));
1177 
1178 	intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
1179 
1180 	i915_gem_drain_freed_objects(dev_priv);
1181 
1182 	drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1183 }
1184 
1185 static void i915_gem_init__mm(struct drm_i915_private *i915)
1186 {
1187 	spin_lock_init(&i915->mm.obj_lock);
1188 
1189 	init_llist_head(&i915->mm.free_list);
1190 
1191 	INIT_LIST_HEAD(&i915->mm.purge_list);
1192 	INIT_LIST_HEAD(&i915->mm.shrink_list);
1193 
1194 	i915_gem_init__objects(i915);
1195 }
1196 
1197 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1198 {
1199 	i915_gem_init__mm(dev_priv);
1200 	i915_gem_init__contexts(dev_priv);
1201 
1202 	spin_lock_init(&dev_priv->fb_tracking.lock);
1203 }
1204 
1205 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1206 {
1207 	i915_gem_drain_freed_objects(dev_priv);
1208 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1209 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1210 	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1211 }
1212 
1213 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1214 {
1215 	struct drm_i915_file_private *file_priv;
1216 	int ret;
1217 
1218 	DRM_DEBUG("\n");
1219 
1220 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1221 	if (!file_priv)
1222 		return -ENOMEM;
1223 
1224 	file->driver_priv = file_priv;
1225 	file_priv->dev_priv = i915;
1226 	file_priv->file = file;
1227 
1228 	file_priv->bsd_engine = -1;
1229 	file_priv->hang_timestamp = jiffies;
1230 
1231 	ret = i915_gem_context_open(i915, file);
1232 	if (ret)
1233 		kfree(file_priv);
1234 
1235 	return ret;
1236 }
1237 
1238 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1239 #include "selftests/mock_gem_device.c"
1240 #include "selftests/i915_gem.c"
1241 #endif
1242