1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2012-2014 Intel Corporation
5  */
6 
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/sched/mm.h>
12 
13 #include <drm/i915_drm.h>
14 
15 #include "i915_drv.h"
16 #include "i915_gem_ioctls.h"
17 #include "i915_gem_object.h"
18 #include "i915_scatterlist.h"
19 
20 struct i915_mm_struct {
21 	struct mm_struct *mm;
22 	struct drm_i915_private *i915;
23 	struct i915_mmu_notifier *mn;
24 	struct hlist_node node;
25 	struct kref kref;
26 	struct work_struct work;
27 };
28 
29 #if defined(CONFIG_MMU_NOTIFIER)
30 #include <linux/interval_tree.h>
31 
32 struct i915_mmu_notifier {
33 	spinlock_t lock;
34 	struct hlist_node node;
35 	struct mmu_notifier mn;
36 	struct rb_root_cached objects;
37 	struct i915_mm_struct *mm;
38 };
39 
40 struct i915_mmu_object {
41 	struct i915_mmu_notifier *mn;
42 	struct drm_i915_gem_object *obj;
43 	struct interval_tree_node it;
44 };
45 
46 static void add_object(struct i915_mmu_object *mo)
47 {
48 	GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
49 	interval_tree_insert(&mo->it, &mo->mn->objects);
50 }
51 
52 static void del_object(struct i915_mmu_object *mo)
53 {
54 	if (RB_EMPTY_NODE(&mo->it.rb))
55 		return;
56 
57 	interval_tree_remove(&mo->it, &mo->mn->objects);
58 	RB_CLEAR_NODE(&mo->it.rb);
59 }
60 
61 static void
62 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
63 {
64 	struct i915_mmu_object *mo = obj->userptr.mmu_object;
65 
66 	/*
67 	 * During mm_invalidate_range we need to cancel any userptr that
68 	 * overlaps the range being invalidated. Doing so requires the
69 	 * struct_mutex, and that risks recursion. In order to cause
70 	 * recursion, the user must alias the userptr address space with
71 	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
72 	 * to invalidate that mmaping, mm_invalidate_range is called with
73 	 * the userptr address *and* the struct_mutex held.  To prevent that
74 	 * we set a flag under the i915_mmu_notifier spinlock to indicate
75 	 * whether this object is valid.
76 	 */
77 	if (!mo)
78 		return;
79 
80 	spin_lock(&mo->mn->lock);
81 	if (value)
82 		add_object(mo);
83 	else
84 		del_object(mo);
85 	spin_unlock(&mo->mn->lock);
86 }
87 
88 static int
89 userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
90 				  const struct mmu_notifier_range *range)
91 {
92 	struct i915_mmu_notifier *mn =
93 		container_of(_mn, struct i915_mmu_notifier, mn);
94 	struct interval_tree_node *it;
95 	unsigned long end;
96 	int ret = 0;
97 
98 	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
99 		return 0;
100 
101 	/* interval ranges are inclusive, but invalidate range is exclusive */
102 	end = range->end - 1;
103 
104 	spin_lock(&mn->lock);
105 	it = interval_tree_iter_first(&mn->objects, range->start, end);
106 	while (it) {
107 		struct drm_i915_gem_object *obj;
108 
109 		if (!mmu_notifier_range_blockable(range)) {
110 			ret = -EAGAIN;
111 			break;
112 		}
113 
114 		/*
115 		 * The mmu_object is released late when destroying the
116 		 * GEM object so it is entirely possible to gain a
117 		 * reference on an object in the process of being freed
118 		 * since our serialisation is via the spinlock and not
119 		 * the struct_mutex - and consequently use it after it
120 		 * is freed and then double free it. To prevent that
121 		 * use-after-free we only acquire a reference on the
122 		 * object if it is not in the process of being destroyed.
123 		 */
124 		obj = container_of(it, struct i915_mmu_object, it)->obj;
125 		if (!kref_get_unless_zero(&obj->base.refcount)) {
126 			it = interval_tree_iter_next(it, range->start, end);
127 			continue;
128 		}
129 		spin_unlock(&mn->lock);
130 
131 		ret = i915_gem_object_unbind(obj,
132 					     I915_GEM_OBJECT_UNBIND_ACTIVE |
133 					     I915_GEM_OBJECT_UNBIND_BARRIER);
134 		if (ret == 0)
135 			ret = __i915_gem_object_put_pages(obj);
136 		i915_gem_object_put(obj);
137 		if (ret)
138 			return ret;
139 
140 		spin_lock(&mn->lock);
141 
142 		/*
143 		 * As we do not (yet) protect the mmu from concurrent insertion
144 		 * over this range, there is no guarantee that this search will
145 		 * terminate given a pathologic workload.
146 		 */
147 		it = interval_tree_iter_first(&mn->objects, range->start, end);
148 	}
149 	spin_unlock(&mn->lock);
150 
151 	return ret;
152 
153 }
154 
155 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
156 	.invalidate_range_start = userptr_mn_invalidate_range_start,
157 };
158 
159 static struct i915_mmu_notifier *
160 i915_mmu_notifier_create(struct i915_mm_struct *mm)
161 {
162 	struct i915_mmu_notifier *mn;
163 
164 	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
165 	if (mn == NULL)
166 		return ERR_PTR(-ENOMEM);
167 
168 	spin_lock_init(&mn->lock);
169 	mn->mn.ops = &i915_gem_userptr_notifier;
170 	mn->objects = RB_ROOT_CACHED;
171 	mn->mm = mm;
172 
173 	return mn;
174 }
175 
176 static void
177 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
178 {
179 	struct i915_mmu_object *mo;
180 
181 	mo = fetch_and_zero(&obj->userptr.mmu_object);
182 	if (!mo)
183 		return;
184 
185 	spin_lock(&mo->mn->lock);
186 	del_object(mo);
187 	spin_unlock(&mo->mn->lock);
188 	kfree(mo);
189 }
190 
191 static struct i915_mmu_notifier *
192 i915_mmu_notifier_find(struct i915_mm_struct *mm)
193 {
194 	struct i915_mmu_notifier *mn;
195 	int err = 0;
196 
197 	mn = mm->mn;
198 	if (mn)
199 		return mn;
200 
201 	mn = i915_mmu_notifier_create(mm);
202 	if (IS_ERR(mn))
203 		err = PTR_ERR(mn);
204 
205 	down_write(&mm->mm->mmap_sem);
206 	mutex_lock(&mm->i915->mm_lock);
207 	if (mm->mn == NULL && !err) {
208 		/* Protected by mmap_sem (write-lock) */
209 		err = __mmu_notifier_register(&mn->mn, mm->mm);
210 		if (!err) {
211 			/* Protected by mm_lock */
212 			mm->mn = fetch_and_zero(&mn);
213 		}
214 	} else if (mm->mn) {
215 		/*
216 		 * Someone else raced and successfully installed the mmu
217 		 * notifier, we can cancel our own errors.
218 		 */
219 		err = 0;
220 	}
221 	mutex_unlock(&mm->i915->mm_lock);
222 	up_write(&mm->mm->mmap_sem);
223 
224 	if (mn && !IS_ERR(mn))
225 		kfree(mn);
226 
227 	return err ? ERR_PTR(err) : mm->mn;
228 }
229 
230 static int
231 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
232 				    unsigned flags)
233 {
234 	struct i915_mmu_notifier *mn;
235 	struct i915_mmu_object *mo;
236 
237 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
238 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
239 
240 	if (WARN_ON(obj->userptr.mm == NULL))
241 		return -EINVAL;
242 
243 	mn = i915_mmu_notifier_find(obj->userptr.mm);
244 	if (IS_ERR(mn))
245 		return PTR_ERR(mn);
246 
247 	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
248 	if (!mo)
249 		return -ENOMEM;
250 
251 	mo->mn = mn;
252 	mo->obj = obj;
253 	mo->it.start = obj->userptr.ptr;
254 	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
255 	RB_CLEAR_NODE(&mo->it.rb);
256 
257 	obj->userptr.mmu_object = mo;
258 	return 0;
259 }
260 
261 static void
262 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
263 		       struct mm_struct *mm)
264 {
265 	if (mn == NULL)
266 		return;
267 
268 	mmu_notifier_unregister(&mn->mn, mm);
269 	kfree(mn);
270 }
271 
272 #else
273 
274 static void
275 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
276 {
277 }
278 
279 static void
280 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
281 {
282 }
283 
284 static int
285 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
286 				    unsigned flags)
287 {
288 	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
289 		return -ENODEV;
290 
291 	if (!capable(CAP_SYS_ADMIN))
292 		return -EPERM;
293 
294 	return 0;
295 }
296 
297 static void
298 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
299 		       struct mm_struct *mm)
300 {
301 }
302 
303 #endif
304 
305 static struct i915_mm_struct *
306 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
307 {
308 	struct i915_mm_struct *mm;
309 
310 	/* Protected by dev_priv->mm_lock */
311 	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
312 		if (mm->mm == real)
313 			return mm;
314 
315 	return NULL;
316 }
317 
318 static int
319 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
320 {
321 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
322 	struct i915_mm_struct *mm;
323 	int ret = 0;
324 
325 	/* During release of the GEM object we hold the struct_mutex. This
326 	 * precludes us from calling mmput() at that time as that may be
327 	 * the last reference and so call exit_mmap(). exit_mmap() will
328 	 * attempt to reap the vma, and if we were holding a GTT mmap
329 	 * would then call drm_gem_vm_close() and attempt to reacquire
330 	 * the struct mutex. So in order to avoid that recursion, we have
331 	 * to defer releasing the mm reference until after we drop the
332 	 * struct_mutex, i.e. we need to schedule a worker to do the clean
333 	 * up.
334 	 */
335 	mutex_lock(&dev_priv->mm_lock);
336 	mm = __i915_mm_struct_find(dev_priv, current->mm);
337 	if (mm == NULL) {
338 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
339 		if (mm == NULL) {
340 			ret = -ENOMEM;
341 			goto out;
342 		}
343 
344 		kref_init(&mm->kref);
345 		mm->i915 = to_i915(obj->base.dev);
346 
347 		mm->mm = current->mm;
348 		mmgrab(current->mm);
349 
350 		mm->mn = NULL;
351 
352 		/* Protected by dev_priv->mm_lock */
353 		hash_add(dev_priv->mm_structs,
354 			 &mm->node, (unsigned long)mm->mm);
355 	} else
356 		kref_get(&mm->kref);
357 
358 	obj->userptr.mm = mm;
359 out:
360 	mutex_unlock(&dev_priv->mm_lock);
361 	return ret;
362 }
363 
364 static void
365 __i915_mm_struct_free__worker(struct work_struct *work)
366 {
367 	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
368 	i915_mmu_notifier_free(mm->mn, mm->mm);
369 	mmdrop(mm->mm);
370 	kfree(mm);
371 }
372 
373 static void
374 __i915_mm_struct_free(struct kref *kref)
375 {
376 	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
377 
378 	/* Protected by dev_priv->mm_lock */
379 	hash_del(&mm->node);
380 	mutex_unlock(&mm->i915->mm_lock);
381 
382 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
383 	queue_work(mm->i915->mm.userptr_wq, &mm->work);
384 }
385 
386 static void
387 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
388 {
389 	if (obj->userptr.mm == NULL)
390 		return;
391 
392 	kref_put_mutex(&obj->userptr.mm->kref,
393 		       __i915_mm_struct_free,
394 		       &to_i915(obj->base.dev)->mm_lock);
395 	obj->userptr.mm = NULL;
396 }
397 
398 struct get_pages_work {
399 	struct work_struct work;
400 	struct drm_i915_gem_object *obj;
401 	struct task_struct *task;
402 };
403 
404 static struct sg_table *
405 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
406 			       struct page **pvec, unsigned long num_pages)
407 {
408 	unsigned int max_segment = i915_sg_segment_size();
409 	struct sg_table *st;
410 	unsigned int sg_page_sizes;
411 	int ret;
412 
413 	st = kmalloc(sizeof(*st), GFP_KERNEL);
414 	if (!st)
415 		return ERR_PTR(-ENOMEM);
416 
417 alloc_table:
418 	ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
419 					  0, num_pages << PAGE_SHIFT,
420 					  max_segment,
421 					  GFP_KERNEL);
422 	if (ret) {
423 		kfree(st);
424 		return ERR_PTR(ret);
425 	}
426 
427 	ret = i915_gem_gtt_prepare_pages(obj, st);
428 	if (ret) {
429 		sg_free_table(st);
430 
431 		if (max_segment > PAGE_SIZE) {
432 			max_segment = PAGE_SIZE;
433 			goto alloc_table;
434 		}
435 
436 		kfree(st);
437 		return ERR_PTR(ret);
438 	}
439 
440 	sg_page_sizes = i915_sg_page_sizes(st->sgl);
441 
442 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
443 
444 	return st;
445 }
446 
447 static void
448 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
449 {
450 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
451 	struct drm_i915_gem_object *obj = work->obj;
452 	const unsigned long npages = obj->base.size >> PAGE_SHIFT;
453 	unsigned long pinned;
454 	struct page **pvec;
455 	int ret;
456 
457 	ret = -ENOMEM;
458 	pinned = 0;
459 
460 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
461 	if (pvec != NULL) {
462 		struct mm_struct *mm = obj->userptr.mm->mm;
463 		unsigned int flags = 0;
464 		int locked = 0;
465 
466 		if (!i915_gem_object_is_readonly(obj))
467 			flags |= FOLL_WRITE;
468 
469 		ret = -EFAULT;
470 		if (mmget_not_zero(mm)) {
471 			while (pinned < npages) {
472 				if (!locked) {
473 					down_read(&mm->mmap_sem);
474 					locked = 1;
475 				}
476 				ret = get_user_pages_remote
477 					(work->task, mm,
478 					 obj->userptr.ptr + pinned * PAGE_SIZE,
479 					 npages - pinned,
480 					 flags,
481 					 pvec + pinned, NULL, &locked);
482 				if (ret < 0)
483 					break;
484 
485 				pinned += ret;
486 			}
487 			if (locked)
488 				up_read(&mm->mmap_sem);
489 			mmput(mm);
490 		}
491 	}
492 
493 	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
494 	if (obj->userptr.work == &work->work) {
495 		struct sg_table *pages = ERR_PTR(ret);
496 
497 		if (pinned == npages) {
498 			pages = __i915_gem_userptr_alloc_pages(obj, pvec,
499 							       npages);
500 			if (!IS_ERR(pages)) {
501 				pinned = 0;
502 				pages = NULL;
503 			}
504 		}
505 
506 		obj->userptr.work = ERR_CAST(pages);
507 		if (IS_ERR(pages))
508 			__i915_gem_userptr_set_active(obj, false);
509 	}
510 	mutex_unlock(&obj->mm.lock);
511 
512 	release_pages(pvec, pinned);
513 	kvfree(pvec);
514 
515 	i915_gem_object_put(obj);
516 	put_task_struct(work->task);
517 	kfree(work);
518 }
519 
520 static struct sg_table *
521 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
522 {
523 	struct get_pages_work *work;
524 
525 	/* Spawn a worker so that we can acquire the
526 	 * user pages without holding our mutex. Access
527 	 * to the user pages requires mmap_sem, and we have
528 	 * a strict lock ordering of mmap_sem, struct_mutex -
529 	 * we already hold struct_mutex here and so cannot
530 	 * call gup without encountering a lock inversion.
531 	 *
532 	 * Userspace will keep on repeating the operation
533 	 * (thanks to EAGAIN) until either we hit the fast
534 	 * path or the worker completes. If the worker is
535 	 * cancelled or superseded, the task is still run
536 	 * but the results ignored. (This leads to
537 	 * complications that we may have a stray object
538 	 * refcount that we need to be wary of when
539 	 * checking for existing objects during creation.)
540 	 * If the worker encounters an error, it reports
541 	 * that error back to this function through
542 	 * obj->userptr.work = ERR_PTR.
543 	 */
544 	work = kmalloc(sizeof(*work), GFP_KERNEL);
545 	if (work == NULL)
546 		return ERR_PTR(-ENOMEM);
547 
548 	obj->userptr.work = &work->work;
549 
550 	work->obj = i915_gem_object_get(obj);
551 
552 	work->task = current;
553 	get_task_struct(work->task);
554 
555 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
556 	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
557 
558 	return ERR_PTR(-EAGAIN);
559 }
560 
561 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
562 {
563 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
564 	struct mm_struct *mm = obj->userptr.mm->mm;
565 	struct page **pvec;
566 	struct sg_table *pages;
567 	bool active;
568 	int pinned;
569 
570 	/* If userspace should engineer that these pages are replaced in
571 	 * the vma between us binding this page into the GTT and completion
572 	 * of rendering... Their loss. If they change the mapping of their
573 	 * pages they need to create a new bo to point to the new vma.
574 	 *
575 	 * However, that still leaves open the possibility of the vma
576 	 * being copied upon fork. Which falls under the same userspace
577 	 * synchronisation issue as a regular bo, except that this time
578 	 * the process may not be expecting that a particular piece of
579 	 * memory is tied to the GPU.
580 	 *
581 	 * Fortunately, we can hook into the mmu_notifier in order to
582 	 * discard the page references prior to anything nasty happening
583 	 * to the vma (discard or cloning) which should prevent the more
584 	 * egregious cases from causing harm.
585 	 */
586 
587 	if (obj->userptr.work) {
588 		/* active flag should still be held for the pending work */
589 		if (IS_ERR(obj->userptr.work))
590 			return PTR_ERR(obj->userptr.work);
591 		else
592 			return -EAGAIN;
593 	}
594 
595 	pvec = NULL;
596 	pinned = 0;
597 
598 	if (mm == current->mm) {
599 		pvec = kvmalloc_array(num_pages, sizeof(struct page *),
600 				      GFP_KERNEL |
601 				      __GFP_NORETRY |
602 				      __GFP_NOWARN);
603 		if (pvec) /* defer to worker if malloc fails */
604 			pinned = __get_user_pages_fast(obj->userptr.ptr,
605 						       num_pages,
606 						       !i915_gem_object_is_readonly(obj),
607 						       pvec);
608 	}
609 
610 	active = false;
611 	if (pinned < 0) {
612 		pages = ERR_PTR(pinned);
613 		pinned = 0;
614 	} else if (pinned < num_pages) {
615 		pages = __i915_gem_userptr_get_pages_schedule(obj);
616 		active = pages == ERR_PTR(-EAGAIN);
617 	} else {
618 		pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
619 		active = !IS_ERR(pages);
620 	}
621 	if (active)
622 		__i915_gem_userptr_set_active(obj, true);
623 
624 	if (IS_ERR(pages))
625 		release_pages(pvec, pinned);
626 	kvfree(pvec);
627 
628 	return PTR_ERR_OR_ZERO(pages);
629 }
630 
631 static void
632 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
633 			   struct sg_table *pages)
634 {
635 	struct sgt_iter sgt_iter;
636 	struct page *page;
637 
638 	/* Cancel any inflight work and force them to restart their gup */
639 	obj->userptr.work = NULL;
640 	__i915_gem_userptr_set_active(obj, false);
641 	if (!pages)
642 		return;
643 
644 	__i915_gem_object_release_shmem(obj, pages, true);
645 	i915_gem_gtt_finish_pages(obj, pages);
646 
647 	/*
648 	 * We always mark objects as dirty when they are used by the GPU,
649 	 * just in case. However, if we set the vma as being read-only we know
650 	 * that the object will never have been written to.
651 	 */
652 	if (i915_gem_object_is_readonly(obj))
653 		obj->mm.dirty = false;
654 
655 	for_each_sgt_page(page, sgt_iter, pages) {
656 		if (obj->mm.dirty && trylock_page(page)) {
657 			/*
658 			 * As this may not be anonymous memory (e.g. shmem)
659 			 * but exist on a real mapping, we have to lock
660 			 * the page in order to dirty it -- holding
661 			 * the page reference is not sufficient to
662 			 * prevent the inode from being truncated.
663 			 * Play safe and take the lock.
664 			 *
665 			 * However...!
666 			 *
667 			 * The mmu-notifier can be invalidated for a
668 			 * migrate_page, that is alreadying holding the lock
669 			 * on the page. Such a try_to_unmap() will result
670 			 * in us calling put_pages() and so recursively try
671 			 * to lock the page. We avoid that deadlock with
672 			 * a trylock_page() and in exchange we risk missing
673 			 * some page dirtying.
674 			 */
675 			set_page_dirty(page);
676 			unlock_page(page);
677 		}
678 
679 		mark_page_accessed(page);
680 		put_page(page);
681 	}
682 	obj->mm.dirty = false;
683 
684 	sg_free_table(pages);
685 	kfree(pages);
686 }
687 
688 static void
689 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
690 {
691 	i915_gem_userptr_release__mmu_notifier(obj);
692 	i915_gem_userptr_release__mm_struct(obj);
693 }
694 
695 static int
696 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
697 {
698 	if (obj->userptr.mmu_object)
699 		return 0;
700 
701 	return i915_gem_userptr_init__mmu_notifier(obj, 0);
702 }
703 
704 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
705 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
706 		 I915_GEM_OBJECT_IS_SHRINKABLE |
707 		 I915_GEM_OBJECT_NO_MMAP |
708 		 I915_GEM_OBJECT_ASYNC_CANCEL,
709 	.get_pages = i915_gem_userptr_get_pages,
710 	.put_pages = i915_gem_userptr_put_pages,
711 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
712 	.release = i915_gem_userptr_release,
713 };
714 
715 /*
716  * Creates a new mm object that wraps some normal memory from the process
717  * context - user memory.
718  *
719  * We impose several restrictions upon the memory being mapped
720  * into the GPU.
721  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
722  * 2. It must be normal system memory, not a pointer into another map of IO
723  *    space (e.g. it must not be a GTT mmapping of another object).
724  * 3. We only allow a bo as large as we could in theory map into the GTT,
725  *    that is we limit the size to the total size of the GTT.
726  * 4. The bo is marked as being snoopable. The backing pages are left
727  *    accessible directly by the CPU, but reads and writes by the GPU may
728  *    incur the cost of a snoop (unless you have an LLC architecture).
729  *
730  * Synchronisation between multiple users and the GPU is left to userspace
731  * through the normal set-domain-ioctl. The kernel will enforce that the
732  * GPU relinquishes the VMA before it is returned back to the system
733  * i.e. upon free(), munmap() or process termination. However, the userspace
734  * malloc() library may not immediately relinquish the VMA after free() and
735  * instead reuse it whilst the GPU is still reading and writing to the VMA.
736  * Caveat emptor.
737  *
738  * Also note, that the object created here is not currently a "first class"
739  * object, in that several ioctls are banned. These are the CPU access
740  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
741  * direct access via your pointer rather than use those ioctls. Another
742  * restriction is that we do not allow userptr surfaces to be pinned to the
743  * hardware and so we reject any attempt to create a framebuffer out of a
744  * userptr.
745  *
746  * If you think this is a good interface to use to pass GPU memory between
747  * drivers, please use dma-buf instead. In fact, wherever possible use
748  * dma-buf instead.
749  */
750 int
751 i915_gem_userptr_ioctl(struct drm_device *dev,
752 		       void *data,
753 		       struct drm_file *file)
754 {
755 	static struct lock_class_key lock_class;
756 	struct drm_i915_private *dev_priv = to_i915(dev);
757 	struct drm_i915_gem_userptr *args = data;
758 	struct drm_i915_gem_object *obj;
759 	int ret;
760 	u32 handle;
761 
762 	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
763 		/* We cannot support coherent userptr objects on hw without
764 		 * LLC and broken snooping.
765 		 */
766 		return -ENODEV;
767 	}
768 
769 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
770 			    I915_USERPTR_UNSYNCHRONIZED))
771 		return -EINVAL;
772 
773 	/*
774 	 * XXX: There is a prevalence of the assumption that we fit the
775 	 * object's page count inside a 32bit _signed_ variable. Let's document
776 	 * this and catch if we ever need to fix it. In the meantime, if you do
777 	 * spot such a local variable, please consider fixing!
778 	 *
779 	 * Aside from our own locals (for which we have no excuse!):
780 	 * - sg_table embeds unsigned int for num_pages
781 	 * - get_user_pages*() mixed ints with longs
782 	 */
783 
784 	if (args->user_size >> PAGE_SHIFT > INT_MAX)
785 		return -E2BIG;
786 
787 	if (overflows_type(args->user_size, obj->base.size))
788 		return -E2BIG;
789 
790 	if (!args->user_size)
791 		return -EINVAL;
792 
793 	if (offset_in_page(args->user_ptr | args->user_size))
794 		return -EINVAL;
795 
796 	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
797 		return -EFAULT;
798 
799 	if (args->flags & I915_USERPTR_READ_ONLY) {
800 		/*
801 		 * On almost all of the older hw, we cannot tell the GPU that
802 		 * a page is readonly.
803 		 */
804 		if (!dev_priv->gt.vm->has_read_only)
805 			return -ENODEV;
806 	}
807 
808 	obj = i915_gem_object_alloc();
809 	if (obj == NULL)
810 		return -ENOMEM;
811 
812 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
813 	i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
814 	obj->read_domains = I915_GEM_DOMAIN_CPU;
815 	obj->write_domain = I915_GEM_DOMAIN_CPU;
816 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
817 
818 	obj->userptr.ptr = args->user_ptr;
819 	if (args->flags & I915_USERPTR_READ_ONLY)
820 		i915_gem_object_set_readonly(obj);
821 
822 	/* And keep a pointer to the current->mm for resolving the user pages
823 	 * at binding. This means that we need to hook into the mmu_notifier
824 	 * in order to detect if the mmu is destroyed.
825 	 */
826 	ret = i915_gem_userptr_init__mm_struct(obj);
827 	if (ret == 0)
828 		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
829 	if (ret == 0)
830 		ret = drm_gem_handle_create(file, &obj->base, &handle);
831 
832 	/* drop reference from allocate - handle holds it now */
833 	i915_gem_object_put(obj);
834 	if (ret)
835 		return ret;
836 
837 	args->handle = handle;
838 	return 0;
839 }
840 
841 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
842 {
843 	mutex_init(&dev_priv->mm_lock);
844 	hash_init(dev_priv->mm_structs);
845 
846 	dev_priv->mm.userptr_wq =
847 		alloc_workqueue("i915-userptr-acquire",
848 				WQ_HIGHPRI | WQ_UNBOUND,
849 				0);
850 	if (!dev_priv->mm.userptr_wq)
851 		return -ENOMEM;
852 
853 	return 0;
854 }
855 
856 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
857 {
858 	destroy_workqueue(dev_priv->mm.userptr_wq);
859 }
860