1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
11 
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
14 
15 #include "i915_drv.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ioctls.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_mman.h"
20 #include "i915_trace.h"
21 #include "i915_user_extensions.h"
22 #include "i915_gem_ttm.h"
23 #include "i915_vma.h"
24 
25 static inline bool
26 __vma_matches(struct vm_area_struct *vma, struct file *filp,
27 	      unsigned long addr, unsigned long size)
28 {
29 	if (vma->vm_file != filp)
30 		return false;
31 
32 	return vma->vm_start == addr &&
33 	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
34 }
35 
36 /**
37  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
38  *			 it is mapped to.
39  * @dev: drm device
40  * @data: ioctl data blob
41  * @file: drm file
42  *
43  * While the mapping holds a reference on the contents of the object, it doesn't
44  * imply a ref on the object itself.
45  *
46  * IMPORTANT:
47  *
48  * DRM driver writers who look a this function as an example for how to do GEM
49  * mmap support, please don't implement mmap support like here. The modern way
50  * to implement DRM mmap support is with an mmap offset ioctl (like
51  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
52  * That way debug tooling like valgrind will understand what's going on, hiding
53  * the mmap call in a driver private ioctl will break that. The i915 driver only
54  * does cpu mmaps this way because we didn't know better.
55  */
56 int
57 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
58 		    struct drm_file *file)
59 {
60 	struct drm_i915_private *i915 = to_i915(dev);
61 	struct drm_i915_gem_mmap *args = data;
62 	struct drm_i915_gem_object *obj;
63 	unsigned long addr;
64 
65 	/* mmap ioctl is disallowed for all platforms after TGL-LP.  This also
66 	 * covers all platforms with local memory.
67 	 */
68 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
69 		return -EOPNOTSUPP;
70 
71 	if (args->flags & ~(I915_MMAP_WC))
72 		return -EINVAL;
73 
74 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
75 		return -ENODEV;
76 
77 	obj = i915_gem_object_lookup(file, args->handle);
78 	if (!obj)
79 		return -ENOENT;
80 
81 	/* prime objects have no backing filp to GEM mmap
82 	 * pages from.
83 	 */
84 	if (!obj->base.filp) {
85 		addr = -ENXIO;
86 		goto err;
87 	}
88 
89 	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
90 		addr = -EINVAL;
91 		goto err;
92 	}
93 
94 	addr = vm_mmap(obj->base.filp, 0, args->size,
95 		       PROT_READ | PROT_WRITE, MAP_SHARED,
96 		       args->offset);
97 	if (IS_ERR_VALUE(addr))
98 		goto err;
99 
100 	if (args->flags & I915_MMAP_WC) {
101 		struct mm_struct *mm = current->mm;
102 		struct vm_area_struct *vma;
103 
104 		if (mmap_write_lock_killable(mm)) {
105 			addr = -EINTR;
106 			goto err;
107 		}
108 		vma = find_vma(mm, addr);
109 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
110 			vma->vm_page_prot =
111 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
112 		else
113 			addr = -ENOMEM;
114 		mmap_write_unlock(mm);
115 		if (IS_ERR_VALUE(addr))
116 			goto err;
117 	}
118 	i915_gem_object_put(obj);
119 
120 	args->addr_ptr = (u64)addr;
121 	return 0;
122 
123 err:
124 	i915_gem_object_put(obj);
125 	return addr;
126 }
127 
128 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
129 {
130 	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
131 }
132 
133 /**
134  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
135  *
136  * A history of the GTT mmap interface:
137  *
138  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
139  *     aligned and suitable for fencing, and still fit into the available
140  *     mappable space left by the pinned display objects. A classic problem
141  *     we called the page-fault-of-doom where we would ping-pong between
142  *     two objects that could not fit inside the GTT and so the memcpy
143  *     would page one object in at the expense of the other between every
144  *     single byte.
145  *
146  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
147  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
148  *     object is too large for the available space (or simply too large
149  *     for the mappable aperture!), a view is created instead and faulted
150  *     into userspace. (This view is aligned and sized appropriately for
151  *     fenced access.)
152  *
153  * 2 - Recognise WC as a separate cache domain so that we can flush the
154  *     delayed writes via GTT before performing direct access via WC.
155  *
156  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
157  *     pagefault; swapin remains transparent.
158  *
159  * 4 - Support multiple fault handlers per object depending on object's
160  *     backing storage (a.k.a. MMAP_OFFSET).
161  *
162  * Restrictions:
163  *
164  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
165  *    hangs on some architectures, corruption on others. An attempt to service
166  *    a GTT page fault from a snoopable object will generate a SIGBUS.
167  *
168  *  * the object must be able to fit into RAM (physical memory, though no
169  *    limited to the mappable aperture).
170  *
171  *
172  * Caveats:
173  *
174  *  * a new GTT page fault will synchronize rendering from the GPU and flush
175  *    all data to system memory. Subsequent access will not be synchronized.
176  *
177  *  * all mappings are revoked on runtime device suspend.
178  *
179  *  * there are only 8, 16 or 32 fence registers to share between all users
180  *    (older machines require fence register for display and blitter access
181  *    as well). Contention of the fence registers will cause the previous users
182  *    to be unmapped and any new access will generate new page faults.
183  *
184  *  * running out of memory while servicing a fault may generate a SIGBUS,
185  *    rather than the expected SIGSEGV.
186  */
187 int i915_gem_mmap_gtt_version(void)
188 {
189 	return 4;
190 }
191 
192 static inline struct i915_ggtt_view
193 compute_partial_view(const struct drm_i915_gem_object *obj,
194 		     pgoff_t page_offset,
195 		     unsigned int chunk)
196 {
197 	struct i915_ggtt_view view;
198 
199 	if (i915_gem_object_is_tiled(obj))
200 		chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
201 
202 	view.type = I915_GGTT_VIEW_PARTIAL;
203 	view.partial.offset = rounddown(page_offset, chunk);
204 	view.partial.size =
205 		min_t(unsigned int, chunk,
206 		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
207 
208 	/* If the partial covers the entire object, just create a normal VMA. */
209 	if (chunk >= obj->base.size >> PAGE_SHIFT)
210 		view.type = I915_GGTT_VIEW_NORMAL;
211 
212 	return view;
213 }
214 
215 static vm_fault_t i915_error_to_vmf_fault(int err)
216 {
217 	switch (err) {
218 	default:
219 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
220 		fallthrough;
221 	case -EIO: /* shmemfs failure from swap device */
222 	case -EFAULT: /* purged object */
223 	case -ENODEV: /* bad object, how did you get here! */
224 	case -ENXIO: /* unable to access backing store (on device) */
225 		return VM_FAULT_SIGBUS;
226 
227 	case -ENOMEM: /* our allocation failure */
228 		return VM_FAULT_OOM;
229 
230 	case 0:
231 	case -EAGAIN:
232 	case -ENOSPC: /* transient failure to evict? */
233 	case -ERESTARTSYS:
234 	case -EINTR:
235 	case -EBUSY:
236 		/*
237 		 * EBUSY is ok: this just means that another thread
238 		 * already did the job.
239 		 */
240 		return VM_FAULT_NOPAGE;
241 	}
242 }
243 
244 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
245 {
246 	struct vm_area_struct *area = vmf->vma;
247 	struct i915_mmap_offset *mmo = area->vm_private_data;
248 	struct drm_i915_gem_object *obj = mmo->obj;
249 	resource_size_t iomap;
250 	int err;
251 
252 	/* Sanity check that we allow writing into this object */
253 	if (unlikely(i915_gem_object_is_readonly(obj) &&
254 		     area->vm_flags & VM_WRITE))
255 		return VM_FAULT_SIGBUS;
256 
257 	if (i915_gem_object_lock_interruptible(obj, NULL))
258 		return VM_FAULT_NOPAGE;
259 
260 	err = i915_gem_object_pin_pages(obj);
261 	if (err)
262 		goto out;
263 
264 	iomap = -1;
265 	if (!i915_gem_object_has_struct_page(obj)) {
266 		iomap = obj->mm.region->iomap.base;
267 		iomap -= obj->mm.region->region.start;
268 	}
269 
270 	/* PTEs are revoked in obj->ops->put_pages() */
271 	err = remap_io_sg(area,
272 			  area->vm_start, area->vm_end - area->vm_start,
273 			  obj->mm.pages->sgl, iomap);
274 
275 	if (area->vm_flags & VM_WRITE) {
276 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
277 		obj->mm.dirty = true;
278 	}
279 
280 	i915_gem_object_unpin_pages(obj);
281 
282 out:
283 	i915_gem_object_unlock(obj);
284 	return i915_error_to_vmf_fault(err);
285 }
286 
287 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
288 {
289 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
290 	struct vm_area_struct *area = vmf->vma;
291 	struct i915_mmap_offset *mmo = area->vm_private_data;
292 	struct drm_i915_gem_object *obj = mmo->obj;
293 	struct drm_device *dev = obj->base.dev;
294 	struct drm_i915_private *i915 = to_i915(dev);
295 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
296 	struct i915_ggtt *ggtt = &i915->ggtt;
297 	bool write = area->vm_flags & VM_WRITE;
298 	struct i915_gem_ww_ctx ww;
299 	intel_wakeref_t wakeref;
300 	struct i915_vma *vma;
301 	pgoff_t page_offset;
302 	int srcu;
303 	int ret;
304 
305 	/* We don't use vmf->pgoff since that has the fake offset */
306 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
307 
308 	trace_i915_gem_object_fault(obj, page_offset, true, write);
309 
310 	wakeref = intel_runtime_pm_get(rpm);
311 
312 	i915_gem_ww_ctx_init(&ww, true);
313 retry:
314 	ret = i915_gem_object_lock(obj, &ww);
315 	if (ret)
316 		goto err_rpm;
317 
318 	/* Sanity check that we allow writing into this object */
319 	if (i915_gem_object_is_readonly(obj) && write) {
320 		ret = -EFAULT;
321 		goto err_rpm;
322 	}
323 
324 	ret = i915_gem_object_pin_pages(obj);
325 	if (ret)
326 		goto err_rpm;
327 
328 	ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
329 	if (ret)
330 		goto err_pages;
331 
332 	/* Now pin it into the GTT as needed */
333 	vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
334 					  PIN_MAPPABLE |
335 					  PIN_NONBLOCK /* NOWARN */ |
336 					  PIN_NOEVICT);
337 	if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
338 		/* Use a partial view if it is bigger than available space */
339 		struct i915_ggtt_view view =
340 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
341 		unsigned int flags;
342 
343 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
344 		if (view.type == I915_GGTT_VIEW_NORMAL)
345 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
346 
347 		/*
348 		 * Userspace is now writing through an untracked VMA, abandon
349 		 * all hope that the hardware is able to track future writes.
350 		 */
351 
352 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
353 		if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
354 			flags = PIN_MAPPABLE;
355 			view.type = I915_GGTT_VIEW_PARTIAL;
356 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
357 		}
358 
359 		/* The entire mappable GGTT is pinned? Unexpected! */
360 		GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
361 	}
362 	if (IS_ERR(vma)) {
363 		ret = PTR_ERR(vma);
364 		goto err_reset;
365 	}
366 
367 	/* Access to snoopable pages through the GTT is incoherent. */
368 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
369 		ret = -EFAULT;
370 		goto err_unpin;
371 	}
372 
373 	ret = i915_vma_pin_fence(vma);
374 	if (ret)
375 		goto err_unpin;
376 
377 	/* Finally, remap it using the new GTT offset */
378 	ret = remap_io_mapping(area,
379 			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
380 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
381 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
382 			       &ggtt->iomap);
383 	if (ret)
384 		goto err_fence;
385 
386 	assert_rpm_wakelock_held(rpm);
387 
388 	/* Mark as being mmapped into userspace for later revocation */
389 	mutex_lock(&i915->ggtt.vm.mutex);
390 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
391 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
392 	mutex_unlock(&i915->ggtt.vm.mutex);
393 
394 	/* Track the mmo associated with the fenced vma */
395 	vma->mmo = mmo;
396 
397 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
398 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
399 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
400 
401 	if (write) {
402 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
403 		i915_vma_set_ggtt_write(vma);
404 		obj->mm.dirty = true;
405 	}
406 
407 err_fence:
408 	i915_vma_unpin_fence(vma);
409 err_unpin:
410 	__i915_vma_unpin(vma);
411 err_reset:
412 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
413 err_pages:
414 	i915_gem_object_unpin_pages(obj);
415 err_rpm:
416 	if (ret == -EDEADLK) {
417 		ret = i915_gem_ww_ctx_backoff(&ww);
418 		if (!ret)
419 			goto retry;
420 	}
421 	i915_gem_ww_ctx_fini(&ww);
422 	intel_runtime_pm_put(rpm, wakeref);
423 	return i915_error_to_vmf_fault(ret);
424 }
425 
426 static int
427 vm_access(struct vm_area_struct *area, unsigned long addr,
428 	  void *buf, int len, int write)
429 {
430 	struct i915_mmap_offset *mmo = area->vm_private_data;
431 	struct drm_i915_gem_object *obj = mmo->obj;
432 	struct i915_gem_ww_ctx ww;
433 	void *vaddr;
434 	int err = 0;
435 
436 	if (i915_gem_object_is_readonly(obj) && write)
437 		return -EACCES;
438 
439 	addr -= area->vm_start;
440 	if (addr >= obj->base.size)
441 		return -EINVAL;
442 
443 	i915_gem_ww_ctx_init(&ww, true);
444 retry:
445 	err = i915_gem_object_lock(obj, &ww);
446 	if (err)
447 		goto out;
448 
449 	/* As this is primarily for debugging, let's focus on simplicity */
450 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
451 	if (IS_ERR(vaddr)) {
452 		err = PTR_ERR(vaddr);
453 		goto out;
454 	}
455 
456 	if (write) {
457 		memcpy(vaddr + addr, buf, len);
458 		__i915_gem_object_flush_map(obj, addr, len);
459 	} else {
460 		memcpy(buf, vaddr + addr, len);
461 	}
462 
463 	i915_gem_object_unpin_map(obj);
464 out:
465 	if (err == -EDEADLK) {
466 		err = i915_gem_ww_ctx_backoff(&ww);
467 		if (!err)
468 			goto retry;
469 	}
470 	i915_gem_ww_ctx_fini(&ww);
471 
472 	if (err)
473 		return err;
474 
475 	return len;
476 }
477 
478 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
479 {
480 	struct i915_vma *vma;
481 
482 	GEM_BUG_ON(!obj->userfault_count);
483 
484 	for_each_ggtt_vma(vma, obj)
485 		i915_vma_revoke_mmap(vma);
486 
487 	GEM_BUG_ON(obj->userfault_count);
488 }
489 
490 /*
491  * It is vital that we remove the page mapping if we have mapped a tiled
492  * object through the GTT and then lose the fence register due to
493  * resource pressure. Similarly if the object has been moved out of the
494  * aperture, than pages mapped into userspace must be revoked. Removing the
495  * mapping will then trigger a page fault on the next user access, allowing
496  * fixup by vm_fault_gtt().
497  */
498 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
499 {
500 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
501 	intel_wakeref_t wakeref;
502 
503 	/*
504 	 * Serialisation between user GTT access and our code depends upon
505 	 * revoking the CPU's PTE whilst the mutex is held. The next user
506 	 * pagefault then has to wait until we release the mutex.
507 	 *
508 	 * Note that RPM complicates somewhat by adding an additional
509 	 * requirement that operations to the GGTT be made holding the RPM
510 	 * wakeref.
511 	 */
512 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
513 	mutex_lock(&i915->ggtt.vm.mutex);
514 
515 	if (!obj->userfault_count)
516 		goto out;
517 
518 	__i915_gem_object_release_mmap_gtt(obj);
519 
520 	/*
521 	 * Ensure that the CPU's PTE are revoked and there are not outstanding
522 	 * memory transactions from userspace before we return. The TLB
523 	 * flushing implied above by changing the PTE above *should* be
524 	 * sufficient, an extra barrier here just provides us with a bit
525 	 * of paranoid documentation about our requirement to serialise
526 	 * memory writes before touching registers / GSM.
527 	 */
528 	wmb();
529 
530 out:
531 	mutex_unlock(&i915->ggtt.vm.mutex);
532 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
533 }
534 
535 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
536 {
537 	struct i915_mmap_offset *mmo, *mn;
538 
539 	spin_lock(&obj->mmo.lock);
540 	rbtree_postorder_for_each_entry_safe(mmo, mn,
541 					     &obj->mmo.offsets, offset) {
542 		/*
543 		 * vma_node_unmap for GTT mmaps handled already in
544 		 * __i915_gem_object_release_mmap_gtt
545 		 */
546 		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
547 			continue;
548 
549 		spin_unlock(&obj->mmo.lock);
550 		drm_vma_node_unmap(&mmo->vma_node,
551 				   obj->base.dev->anon_inode->i_mapping);
552 		spin_lock(&obj->mmo.lock);
553 	}
554 	spin_unlock(&obj->mmo.lock);
555 }
556 
557 static struct i915_mmap_offset *
558 lookup_mmo(struct drm_i915_gem_object *obj,
559 	   enum i915_mmap_type mmap_type)
560 {
561 	struct rb_node *rb;
562 
563 	spin_lock(&obj->mmo.lock);
564 	rb = obj->mmo.offsets.rb_node;
565 	while (rb) {
566 		struct i915_mmap_offset *mmo =
567 			rb_entry(rb, typeof(*mmo), offset);
568 
569 		if (mmo->mmap_type == mmap_type) {
570 			spin_unlock(&obj->mmo.lock);
571 			return mmo;
572 		}
573 
574 		if (mmo->mmap_type < mmap_type)
575 			rb = rb->rb_right;
576 		else
577 			rb = rb->rb_left;
578 	}
579 	spin_unlock(&obj->mmo.lock);
580 
581 	return NULL;
582 }
583 
584 static struct i915_mmap_offset *
585 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
586 {
587 	struct rb_node *rb, **p;
588 
589 	spin_lock(&obj->mmo.lock);
590 	rb = NULL;
591 	p = &obj->mmo.offsets.rb_node;
592 	while (*p) {
593 		struct i915_mmap_offset *pos;
594 
595 		rb = *p;
596 		pos = rb_entry(rb, typeof(*pos), offset);
597 
598 		if (pos->mmap_type == mmo->mmap_type) {
599 			spin_unlock(&obj->mmo.lock);
600 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
601 					      &mmo->vma_node);
602 			kfree(mmo);
603 			return pos;
604 		}
605 
606 		if (pos->mmap_type < mmo->mmap_type)
607 			p = &rb->rb_right;
608 		else
609 			p = &rb->rb_left;
610 	}
611 	rb_link_node(&mmo->offset, rb, p);
612 	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
613 	spin_unlock(&obj->mmo.lock);
614 
615 	return mmo;
616 }
617 
618 static struct i915_mmap_offset *
619 mmap_offset_attach(struct drm_i915_gem_object *obj,
620 		   enum i915_mmap_type mmap_type,
621 		   struct drm_file *file)
622 {
623 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
624 	struct i915_mmap_offset *mmo;
625 	int err;
626 
627 	GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
628 
629 	mmo = lookup_mmo(obj, mmap_type);
630 	if (mmo)
631 		goto out;
632 
633 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
634 	if (!mmo)
635 		return ERR_PTR(-ENOMEM);
636 
637 	mmo->obj = obj;
638 	mmo->mmap_type = mmap_type;
639 	drm_vma_node_reset(&mmo->vma_node);
640 
641 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
642 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
643 	if (likely(!err))
644 		goto insert;
645 
646 	/* Attempt to reap some mmap space from dead objects */
647 	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
648 	if (err)
649 		goto err;
650 
651 	i915_gem_drain_freed_objects(i915);
652 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
653 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
654 	if (err)
655 		goto err;
656 
657 insert:
658 	mmo = insert_mmo(obj, mmo);
659 	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
660 out:
661 	if (file)
662 		drm_vma_node_allow(&mmo->vma_node, file);
663 	return mmo;
664 
665 err:
666 	kfree(mmo);
667 	return ERR_PTR(err);
668 }
669 
670 static int
671 __assign_mmap_offset(struct drm_i915_gem_object *obj,
672 		     enum i915_mmap_type mmap_type,
673 		     u64 *offset, struct drm_file *file)
674 {
675 	struct i915_mmap_offset *mmo;
676 
677 	if (i915_gem_object_never_mmap(obj))
678 		return -ENODEV;
679 
680 	if (obj->ops->mmap_offset)  {
681 		*offset = obj->ops->mmap_offset(obj);
682 		return 0;
683 	}
684 
685 	if (mmap_type != I915_MMAP_TYPE_GTT &&
686 	    !i915_gem_object_has_struct_page(obj) &&
687 	    !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
688 		return -ENODEV;
689 
690 	mmo = mmap_offset_attach(obj, mmap_type, file);
691 	if (IS_ERR(mmo))
692 		return PTR_ERR(mmo);
693 
694 	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
695 	return 0;
696 }
697 
698 static int
699 __assign_mmap_offset_handle(struct drm_file *file,
700 			    u32 handle,
701 			    enum i915_mmap_type mmap_type,
702 			    u64 *offset)
703 {
704 	struct drm_i915_gem_object *obj;
705 	int err;
706 
707 	obj = i915_gem_object_lookup(file, handle);
708 	if (!obj)
709 		return -ENOENT;
710 
711 	err = __assign_mmap_offset(obj, mmap_type, offset, file);
712 	i915_gem_object_put(obj);
713 	return err;
714 }
715 
716 int
717 i915_gem_dumb_mmap_offset(struct drm_file *file,
718 			  struct drm_device *dev,
719 			  u32 handle,
720 			  u64 *offset)
721 {
722 	enum i915_mmap_type mmap_type;
723 
724 	if (boot_cpu_has(X86_FEATURE_PAT))
725 		mmap_type = I915_MMAP_TYPE_WC;
726 	else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
727 		return -ENODEV;
728 	else
729 		mmap_type = I915_MMAP_TYPE_GTT;
730 
731 	return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
732 }
733 
734 /**
735  * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
736  * @dev: DRM device
737  * @data: GTT mapping ioctl data
738  * @file: GEM object info
739  *
740  * Simply returns the fake offset to userspace so it can mmap it.
741  * The mmap call will end up in drm_gem_mmap(), which will set things
742  * up so we can get faults in the handler above.
743  *
744  * The fault handler will take care of binding the object into the GTT
745  * (since it may have been evicted to make room for something), allocating
746  * a fence register, and mapping the appropriate aperture address into
747  * userspace.
748  */
749 int
750 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
751 			   struct drm_file *file)
752 {
753 	struct drm_i915_private *i915 = to_i915(dev);
754 	struct drm_i915_gem_mmap_offset *args = data;
755 	enum i915_mmap_type type;
756 	int err;
757 
758 	/*
759 	 * Historically we failed to check args.pad and args.offset
760 	 * and so we cannot use those fields for user input and we cannot
761 	 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
762 	 * may be feeding in garbage in those fields.
763 	 *
764 	 * if (args->pad) return -EINVAL; is verbotten!
765 	 */
766 
767 	err = i915_user_extensions(u64_to_user_ptr(args->extensions),
768 				   NULL, 0, NULL);
769 	if (err)
770 		return err;
771 
772 	switch (args->flags) {
773 	case I915_MMAP_OFFSET_GTT:
774 		if (!i915_ggtt_has_aperture(&i915->ggtt))
775 			return -ENODEV;
776 		type = I915_MMAP_TYPE_GTT;
777 		break;
778 
779 	case I915_MMAP_OFFSET_WC:
780 		if (!boot_cpu_has(X86_FEATURE_PAT))
781 			return -ENODEV;
782 		type = I915_MMAP_TYPE_WC;
783 		break;
784 
785 	case I915_MMAP_OFFSET_WB:
786 		type = I915_MMAP_TYPE_WB;
787 		break;
788 
789 	case I915_MMAP_OFFSET_UC:
790 		if (!boot_cpu_has(X86_FEATURE_PAT))
791 			return -ENODEV;
792 		type = I915_MMAP_TYPE_UC;
793 		break;
794 
795 	default:
796 		return -EINVAL;
797 	}
798 
799 	return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
800 }
801 
802 static void vm_open(struct vm_area_struct *vma)
803 {
804 	struct i915_mmap_offset *mmo = vma->vm_private_data;
805 	struct drm_i915_gem_object *obj = mmo->obj;
806 
807 	GEM_BUG_ON(!obj);
808 	i915_gem_object_get(obj);
809 }
810 
811 static void vm_close(struct vm_area_struct *vma)
812 {
813 	struct i915_mmap_offset *mmo = vma->vm_private_data;
814 	struct drm_i915_gem_object *obj = mmo->obj;
815 
816 	GEM_BUG_ON(!obj);
817 	i915_gem_object_put(obj);
818 }
819 
820 static const struct vm_operations_struct vm_ops_gtt = {
821 	.fault = vm_fault_gtt,
822 	.access = vm_access,
823 	.open = vm_open,
824 	.close = vm_close,
825 };
826 
827 static const struct vm_operations_struct vm_ops_cpu = {
828 	.fault = vm_fault_cpu,
829 	.access = vm_access,
830 	.open = vm_open,
831 	.close = vm_close,
832 };
833 
834 static int singleton_release(struct inode *inode, struct file *file)
835 {
836 	struct drm_i915_private *i915 = file->private_data;
837 
838 	cmpxchg(&i915->gem.mmap_singleton, file, NULL);
839 	drm_dev_put(&i915->drm);
840 
841 	return 0;
842 }
843 
844 static const struct file_operations singleton_fops = {
845 	.owner = THIS_MODULE,
846 	.release = singleton_release,
847 };
848 
849 static struct file *mmap_singleton(struct drm_i915_private *i915)
850 {
851 	struct file *file;
852 
853 	rcu_read_lock();
854 	file = READ_ONCE(i915->gem.mmap_singleton);
855 	if (file && !get_file_rcu(file))
856 		file = NULL;
857 	rcu_read_unlock();
858 	if (file)
859 		return file;
860 
861 	file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
862 	if (IS_ERR(file))
863 		return file;
864 
865 	/* Everyone shares a single global address space */
866 	file->f_mapping = i915->drm.anon_inode->i_mapping;
867 
868 	smp_store_mb(i915->gem.mmap_singleton, file);
869 	drm_dev_get(&i915->drm);
870 
871 	return file;
872 }
873 
874 /*
875  * This overcomes the limitation in drm_gem_mmap's assignment of a
876  * drm_gem_object as the vma->vm_private_data. Since we need to
877  * be able to resolve multiple mmap offsets which could be tied
878  * to a single gem object.
879  */
880 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
881 {
882 	struct drm_vma_offset_node *node;
883 	struct drm_file *priv = filp->private_data;
884 	struct drm_device *dev = priv->minor->dev;
885 	struct drm_i915_gem_object *obj = NULL;
886 	struct i915_mmap_offset *mmo = NULL;
887 	struct file *anon;
888 
889 	if (drm_dev_is_unplugged(dev))
890 		return -ENODEV;
891 
892 	rcu_read_lock();
893 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
894 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
895 						  vma->vm_pgoff,
896 						  vma_pages(vma));
897 	if (node && drm_vma_node_is_allowed(node, priv)) {
898 		/*
899 		 * Skip 0-refcnted objects as it is in the process of being
900 		 * destroyed and will be invalid when the vma manager lock
901 		 * is released.
902 		 */
903 		if (!node->driver_private) {
904 			mmo = container_of(node, struct i915_mmap_offset, vma_node);
905 			obj = i915_gem_object_get_rcu(mmo->obj);
906 
907 			GEM_BUG_ON(obj && obj->ops->mmap_ops);
908 		} else {
909 			obj = i915_gem_object_get_rcu
910 				(container_of(node, struct drm_i915_gem_object,
911 					      base.vma_node));
912 
913 			GEM_BUG_ON(obj && !obj->ops->mmap_ops);
914 		}
915 	}
916 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
917 	rcu_read_unlock();
918 	if (!obj)
919 		return node ? -EACCES : -EINVAL;
920 
921 	if (i915_gem_object_is_readonly(obj)) {
922 		if (vma->vm_flags & VM_WRITE) {
923 			i915_gem_object_put(obj);
924 			return -EINVAL;
925 		}
926 		vma->vm_flags &= ~VM_MAYWRITE;
927 	}
928 
929 	anon = mmap_singleton(to_i915(dev));
930 	if (IS_ERR(anon)) {
931 		i915_gem_object_put(obj);
932 		return PTR_ERR(anon);
933 	}
934 
935 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
936 
937 	if (i915_gem_object_has_iomem(obj))
938 		vma->vm_flags |= VM_IO;
939 
940 	/*
941 	 * We keep the ref on mmo->obj, not vm_file, but we require
942 	 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
943 	 * Our userspace is accustomed to having per-file resource cleanup
944 	 * (i.e. contexts, objects and requests) on their close(fd), which
945 	 * requires avoiding extraneous references to their filp, hence why
946 	 * we prefer to use an anonymous file for their mmaps.
947 	 */
948 	vma_set_file(vma, anon);
949 	/* Drop the initial creation reference, the vma is now holding one. */
950 	fput(anon);
951 
952 	if (obj->ops->mmap_ops) {
953 		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
954 		vma->vm_ops = obj->ops->mmap_ops;
955 		vma->vm_private_data = node->driver_private;
956 		return 0;
957 	}
958 
959 	vma->vm_private_data = mmo;
960 
961 	switch (mmo->mmap_type) {
962 	case I915_MMAP_TYPE_WC:
963 		vma->vm_page_prot =
964 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
965 		vma->vm_ops = &vm_ops_cpu;
966 		break;
967 
968 	case I915_MMAP_TYPE_WB:
969 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
970 		vma->vm_ops = &vm_ops_cpu;
971 		break;
972 
973 	case I915_MMAP_TYPE_UC:
974 		vma->vm_page_prot =
975 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
976 		vma->vm_ops = &vm_ops_cpu;
977 		break;
978 
979 	case I915_MMAP_TYPE_GTT:
980 		vma->vm_page_prot =
981 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
982 		vma->vm_ops = &vm_ops_gtt;
983 		break;
984 	}
985 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
986 
987 	return 0;
988 }
989 
990 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
991 #include "selftests/i915_gem_mman.c"
992 #endif
993