1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2012-2014 Intel Corporation
5  */
6 
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/sched/mm.h>
12 
13 #include "i915_drv.h"
14 #include "i915_gem_ioctls.h"
15 #include "i915_gem_object.h"
16 #include "i915_scatterlist.h"
17 
18 struct i915_mm_struct {
19 	struct mm_struct *mm;
20 	struct drm_i915_private *i915;
21 	struct i915_mmu_notifier *mn;
22 	struct hlist_node node;
23 	struct kref kref;
24 	struct work_struct work;
25 };
26 
27 #if defined(CONFIG_MMU_NOTIFIER)
28 #include <linux/interval_tree.h>
29 
30 struct i915_mmu_notifier {
31 	spinlock_t lock;
32 	struct hlist_node node;
33 	struct mmu_notifier mn;
34 	struct rb_root_cached objects;
35 	struct i915_mm_struct *mm;
36 };
37 
38 struct i915_mmu_object {
39 	struct i915_mmu_notifier *mn;
40 	struct drm_i915_gem_object *obj;
41 	struct interval_tree_node it;
42 };
43 
44 static void add_object(struct i915_mmu_object *mo)
45 {
46 	GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
47 	interval_tree_insert(&mo->it, &mo->mn->objects);
48 }
49 
50 static void del_object(struct i915_mmu_object *mo)
51 {
52 	if (RB_EMPTY_NODE(&mo->it.rb))
53 		return;
54 
55 	interval_tree_remove(&mo->it, &mo->mn->objects);
56 	RB_CLEAR_NODE(&mo->it.rb);
57 }
58 
59 static void
60 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
61 {
62 	struct i915_mmu_object *mo = obj->userptr.mmu_object;
63 
64 	/*
65 	 * During mm_invalidate_range we need to cancel any userptr that
66 	 * overlaps the range being invalidated. Doing so requires the
67 	 * struct_mutex, and that risks recursion. In order to cause
68 	 * recursion, the user must alias the userptr address space with
69 	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
70 	 * to invalidate that mmaping, mm_invalidate_range is called with
71 	 * the userptr address *and* the struct_mutex held.  To prevent that
72 	 * we set a flag under the i915_mmu_notifier spinlock to indicate
73 	 * whether this object is valid.
74 	 */
75 	if (!mo)
76 		return;
77 
78 	spin_lock(&mo->mn->lock);
79 	if (value)
80 		add_object(mo);
81 	else
82 		del_object(mo);
83 	spin_unlock(&mo->mn->lock);
84 }
85 
86 static int
87 userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
88 				  const struct mmu_notifier_range *range)
89 {
90 	struct i915_mmu_notifier *mn =
91 		container_of(_mn, struct i915_mmu_notifier, mn);
92 	struct interval_tree_node *it;
93 	unsigned long end;
94 	int ret = 0;
95 
96 	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
97 		return 0;
98 
99 	/* interval ranges are inclusive, but invalidate range is exclusive */
100 	end = range->end - 1;
101 
102 	spin_lock(&mn->lock);
103 	it = interval_tree_iter_first(&mn->objects, range->start, end);
104 	while (it) {
105 		struct drm_i915_gem_object *obj;
106 
107 		if (!mmu_notifier_range_blockable(range)) {
108 			ret = -EAGAIN;
109 			break;
110 		}
111 
112 		/*
113 		 * The mmu_object is released late when destroying the
114 		 * GEM object so it is entirely possible to gain a
115 		 * reference on an object in the process of being freed
116 		 * since our serialisation is via the spinlock and not
117 		 * the struct_mutex - and consequently use it after it
118 		 * is freed and then double free it. To prevent that
119 		 * use-after-free we only acquire a reference on the
120 		 * object if it is not in the process of being destroyed.
121 		 */
122 		obj = container_of(it, struct i915_mmu_object, it)->obj;
123 		if (!kref_get_unless_zero(&obj->base.refcount)) {
124 			it = interval_tree_iter_next(it, range->start, end);
125 			continue;
126 		}
127 		spin_unlock(&mn->lock);
128 
129 		ret = i915_gem_object_unbind(obj,
130 					     I915_GEM_OBJECT_UNBIND_ACTIVE |
131 					     I915_GEM_OBJECT_UNBIND_BARRIER);
132 		if (ret == 0)
133 			ret = __i915_gem_object_put_pages(obj);
134 		i915_gem_object_put(obj);
135 		if (ret)
136 			return ret;
137 
138 		spin_lock(&mn->lock);
139 
140 		/*
141 		 * As we do not (yet) protect the mmu from concurrent insertion
142 		 * over this range, there is no guarantee that this search will
143 		 * terminate given a pathologic workload.
144 		 */
145 		it = interval_tree_iter_first(&mn->objects, range->start, end);
146 	}
147 	spin_unlock(&mn->lock);
148 
149 	return ret;
150 
151 }
152 
153 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
154 	.invalidate_range_start = userptr_mn_invalidate_range_start,
155 };
156 
157 static struct i915_mmu_notifier *
158 i915_mmu_notifier_create(struct i915_mm_struct *mm)
159 {
160 	struct i915_mmu_notifier *mn;
161 
162 	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
163 	if (mn == NULL)
164 		return ERR_PTR(-ENOMEM);
165 
166 	spin_lock_init(&mn->lock);
167 	mn->mn.ops = &i915_gem_userptr_notifier;
168 	mn->objects = RB_ROOT_CACHED;
169 	mn->mm = mm;
170 
171 	return mn;
172 }
173 
174 static void
175 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
176 {
177 	struct i915_mmu_object *mo;
178 
179 	mo = fetch_and_zero(&obj->userptr.mmu_object);
180 	if (!mo)
181 		return;
182 
183 	spin_lock(&mo->mn->lock);
184 	del_object(mo);
185 	spin_unlock(&mo->mn->lock);
186 	kfree(mo);
187 }
188 
189 static struct i915_mmu_notifier *
190 i915_mmu_notifier_find(struct i915_mm_struct *mm)
191 {
192 	struct i915_mmu_notifier *mn;
193 	int err = 0;
194 
195 	mn = mm->mn;
196 	if (mn)
197 		return mn;
198 
199 	mn = i915_mmu_notifier_create(mm);
200 	if (IS_ERR(mn))
201 		err = PTR_ERR(mn);
202 
203 	mmap_write_lock(mm->mm);
204 	mutex_lock(&mm->i915->mm_lock);
205 	if (mm->mn == NULL && !err) {
206 		/* Protected by mmap_lock (write-lock) */
207 		err = __mmu_notifier_register(&mn->mn, mm->mm);
208 		if (!err) {
209 			/* Protected by mm_lock */
210 			mm->mn = fetch_and_zero(&mn);
211 		}
212 	} else if (mm->mn) {
213 		/*
214 		 * Someone else raced and successfully installed the mmu
215 		 * notifier, we can cancel our own errors.
216 		 */
217 		err = 0;
218 	}
219 	mutex_unlock(&mm->i915->mm_lock);
220 	mmap_write_unlock(mm->mm);
221 
222 	if (mn && !IS_ERR(mn))
223 		kfree(mn);
224 
225 	return err ? ERR_PTR(err) : mm->mn;
226 }
227 
228 static int
229 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
230 				    unsigned flags)
231 {
232 	struct i915_mmu_notifier *mn;
233 	struct i915_mmu_object *mo;
234 
235 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
236 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
237 
238 	if (WARN_ON(obj->userptr.mm == NULL))
239 		return -EINVAL;
240 
241 	mn = i915_mmu_notifier_find(obj->userptr.mm);
242 	if (IS_ERR(mn))
243 		return PTR_ERR(mn);
244 
245 	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
246 	if (!mo)
247 		return -ENOMEM;
248 
249 	mo->mn = mn;
250 	mo->obj = obj;
251 	mo->it.start = obj->userptr.ptr;
252 	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
253 	RB_CLEAR_NODE(&mo->it.rb);
254 
255 	obj->userptr.mmu_object = mo;
256 	return 0;
257 }
258 
259 static void
260 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
261 		       struct mm_struct *mm)
262 {
263 	if (mn == NULL)
264 		return;
265 
266 	mmu_notifier_unregister(&mn->mn, mm);
267 	kfree(mn);
268 }
269 
270 #else
271 
272 static void
273 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
274 {
275 }
276 
277 static void
278 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
279 {
280 }
281 
282 static int
283 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
284 				    unsigned flags)
285 {
286 	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
287 		return -ENODEV;
288 
289 	if (!capable(CAP_SYS_ADMIN))
290 		return -EPERM;
291 
292 	return 0;
293 }
294 
295 static void
296 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
297 		       struct mm_struct *mm)
298 {
299 }
300 
301 #endif
302 
303 static struct i915_mm_struct *
304 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
305 {
306 	struct i915_mm_struct *mm;
307 
308 	/* Protected by dev_priv->mm_lock */
309 	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
310 		if (mm->mm == real)
311 			return mm;
312 
313 	return NULL;
314 }
315 
316 static int
317 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
318 {
319 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
320 	struct i915_mm_struct *mm;
321 	int ret = 0;
322 
323 	/* During release of the GEM object we hold the struct_mutex. This
324 	 * precludes us from calling mmput() at that time as that may be
325 	 * the last reference and so call exit_mmap(). exit_mmap() will
326 	 * attempt to reap the vma, and if we were holding a GTT mmap
327 	 * would then call drm_gem_vm_close() and attempt to reacquire
328 	 * the struct mutex. So in order to avoid that recursion, we have
329 	 * to defer releasing the mm reference until after we drop the
330 	 * struct_mutex, i.e. we need to schedule a worker to do the clean
331 	 * up.
332 	 */
333 	mutex_lock(&dev_priv->mm_lock);
334 	mm = __i915_mm_struct_find(dev_priv, current->mm);
335 	if (mm == NULL) {
336 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
337 		if (mm == NULL) {
338 			ret = -ENOMEM;
339 			goto out;
340 		}
341 
342 		kref_init(&mm->kref);
343 		mm->i915 = to_i915(obj->base.dev);
344 
345 		mm->mm = current->mm;
346 		mmgrab(current->mm);
347 
348 		mm->mn = NULL;
349 
350 		/* Protected by dev_priv->mm_lock */
351 		hash_add(dev_priv->mm_structs,
352 			 &mm->node, (unsigned long)mm->mm);
353 	} else
354 		kref_get(&mm->kref);
355 
356 	obj->userptr.mm = mm;
357 out:
358 	mutex_unlock(&dev_priv->mm_lock);
359 	return ret;
360 }
361 
362 static void
363 __i915_mm_struct_free__worker(struct work_struct *work)
364 {
365 	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
366 	i915_mmu_notifier_free(mm->mn, mm->mm);
367 	mmdrop(mm->mm);
368 	kfree(mm);
369 }
370 
371 static void
372 __i915_mm_struct_free(struct kref *kref)
373 {
374 	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
375 
376 	/* Protected by dev_priv->mm_lock */
377 	hash_del(&mm->node);
378 	mutex_unlock(&mm->i915->mm_lock);
379 
380 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
381 	queue_work(mm->i915->mm.userptr_wq, &mm->work);
382 }
383 
384 static void
385 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
386 {
387 	if (obj->userptr.mm == NULL)
388 		return;
389 
390 	kref_put_mutex(&obj->userptr.mm->kref,
391 		       __i915_mm_struct_free,
392 		       &to_i915(obj->base.dev)->mm_lock);
393 	obj->userptr.mm = NULL;
394 }
395 
396 struct get_pages_work {
397 	struct work_struct work;
398 	struct drm_i915_gem_object *obj;
399 	struct task_struct *task;
400 };
401 
402 static struct sg_table *
403 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
404 			       struct page **pvec, unsigned long num_pages)
405 {
406 	unsigned int max_segment = i915_sg_segment_size();
407 	struct sg_table *st;
408 	unsigned int sg_page_sizes;
409 	int ret;
410 
411 	st = kmalloc(sizeof(*st), GFP_KERNEL);
412 	if (!st)
413 		return ERR_PTR(-ENOMEM);
414 
415 alloc_table:
416 	ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
417 					  0, num_pages << PAGE_SHIFT,
418 					  max_segment,
419 					  GFP_KERNEL);
420 	if (ret) {
421 		kfree(st);
422 		return ERR_PTR(ret);
423 	}
424 
425 	ret = i915_gem_gtt_prepare_pages(obj, st);
426 	if (ret) {
427 		sg_free_table(st);
428 
429 		if (max_segment > PAGE_SIZE) {
430 			max_segment = PAGE_SIZE;
431 			goto alloc_table;
432 		}
433 
434 		kfree(st);
435 		return ERR_PTR(ret);
436 	}
437 
438 	sg_page_sizes = i915_sg_page_sizes(st->sgl);
439 
440 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
441 
442 	return st;
443 }
444 
445 static void
446 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
447 {
448 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
449 	struct drm_i915_gem_object *obj = work->obj;
450 	const unsigned long npages = obj->base.size >> PAGE_SHIFT;
451 	unsigned long pinned;
452 	struct page **pvec;
453 	int ret;
454 
455 	ret = -ENOMEM;
456 	pinned = 0;
457 
458 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
459 	if (pvec != NULL) {
460 		struct mm_struct *mm = obj->userptr.mm->mm;
461 		unsigned int flags = 0;
462 		int locked = 0;
463 
464 		if (!i915_gem_object_is_readonly(obj))
465 			flags |= FOLL_WRITE;
466 
467 		ret = -EFAULT;
468 		if (mmget_not_zero(mm)) {
469 			while (pinned < npages) {
470 				if (!locked) {
471 					mmap_read_lock(mm);
472 					locked = 1;
473 				}
474 				ret = pin_user_pages_remote
475 					(work->task, mm,
476 					 obj->userptr.ptr + pinned * PAGE_SIZE,
477 					 npages - pinned,
478 					 flags,
479 					 pvec + pinned, NULL, &locked);
480 				if (ret < 0)
481 					break;
482 
483 				pinned += ret;
484 			}
485 			if (locked)
486 				mmap_read_unlock(mm);
487 			mmput(mm);
488 		}
489 	}
490 
491 	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
492 	if (obj->userptr.work == &work->work) {
493 		struct sg_table *pages = ERR_PTR(ret);
494 
495 		if (pinned == npages) {
496 			pages = __i915_gem_userptr_alloc_pages(obj, pvec,
497 							       npages);
498 			if (!IS_ERR(pages)) {
499 				pinned = 0;
500 				pages = NULL;
501 			}
502 		}
503 
504 		obj->userptr.work = ERR_CAST(pages);
505 		if (IS_ERR(pages))
506 			__i915_gem_userptr_set_active(obj, false);
507 	}
508 	mutex_unlock(&obj->mm.lock);
509 
510 	unpin_user_pages(pvec, pinned);
511 	kvfree(pvec);
512 
513 	i915_gem_object_put(obj);
514 	put_task_struct(work->task);
515 	kfree(work);
516 }
517 
518 static struct sg_table *
519 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
520 {
521 	struct get_pages_work *work;
522 
523 	/* Spawn a worker so that we can acquire the
524 	 * user pages without holding our mutex. Access
525 	 * to the user pages requires mmap_lock, and we have
526 	 * a strict lock ordering of mmap_lock, struct_mutex -
527 	 * we already hold struct_mutex here and so cannot
528 	 * call gup without encountering a lock inversion.
529 	 *
530 	 * Userspace will keep on repeating the operation
531 	 * (thanks to EAGAIN) until either we hit the fast
532 	 * path or the worker completes. If the worker is
533 	 * cancelled or superseded, the task is still run
534 	 * but the results ignored. (This leads to
535 	 * complications that we may have a stray object
536 	 * refcount that we need to be wary of when
537 	 * checking for existing objects during creation.)
538 	 * If the worker encounters an error, it reports
539 	 * that error back to this function through
540 	 * obj->userptr.work = ERR_PTR.
541 	 */
542 	work = kmalloc(sizeof(*work), GFP_KERNEL);
543 	if (work == NULL)
544 		return ERR_PTR(-ENOMEM);
545 
546 	obj->userptr.work = &work->work;
547 
548 	work->obj = i915_gem_object_get(obj);
549 
550 	work->task = current;
551 	get_task_struct(work->task);
552 
553 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
554 	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
555 
556 	return ERR_PTR(-EAGAIN);
557 }
558 
559 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
560 {
561 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
562 	struct mm_struct *mm = obj->userptr.mm->mm;
563 	struct page **pvec;
564 	struct sg_table *pages;
565 	bool active;
566 	int pinned;
567 	unsigned int gup_flags = 0;
568 
569 	/* If userspace should engineer that these pages are replaced in
570 	 * the vma between us binding this page into the GTT and completion
571 	 * of rendering... Their loss. If they change the mapping of their
572 	 * pages they need to create a new bo to point to the new vma.
573 	 *
574 	 * However, that still leaves open the possibility of the vma
575 	 * being copied upon fork. Which falls under the same userspace
576 	 * synchronisation issue as a regular bo, except that this time
577 	 * the process may not be expecting that a particular piece of
578 	 * memory is tied to the GPU.
579 	 *
580 	 * Fortunately, we can hook into the mmu_notifier in order to
581 	 * discard the page references prior to anything nasty happening
582 	 * to the vma (discard or cloning) which should prevent the more
583 	 * egregious cases from causing harm.
584 	 */
585 
586 	if (obj->userptr.work) {
587 		/* active flag should still be held for the pending work */
588 		if (IS_ERR(obj->userptr.work))
589 			return PTR_ERR(obj->userptr.work);
590 		else
591 			return -EAGAIN;
592 	}
593 
594 	pvec = NULL;
595 	pinned = 0;
596 
597 	if (mm == current->mm) {
598 		pvec = kvmalloc_array(num_pages, sizeof(struct page *),
599 				      GFP_KERNEL |
600 				      __GFP_NORETRY |
601 				      __GFP_NOWARN);
602 		/*
603 		 * Using __get_user_pages_fast() with a read-only
604 		 * access is questionable. A read-only page may be
605 		 * COW-broken, and then this might end up giving
606 		 * the wrong side of the COW..
607 		 *
608 		 * We may or may not care.
609 		 */
610 		if (pvec) {
611 			/* defer to worker if malloc fails */
612 			if (!i915_gem_object_is_readonly(obj))
613 				gup_flags |= FOLL_WRITE;
614 			pinned = pin_user_pages_fast_only(obj->userptr.ptr,
615 							  num_pages, gup_flags,
616 							  pvec);
617 		}
618 	}
619 
620 	active = false;
621 	if (pinned < 0) {
622 		pages = ERR_PTR(pinned);
623 		pinned = 0;
624 	} else if (pinned < num_pages) {
625 		pages = __i915_gem_userptr_get_pages_schedule(obj);
626 		active = pages == ERR_PTR(-EAGAIN);
627 	} else {
628 		pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
629 		active = !IS_ERR(pages);
630 	}
631 	if (active)
632 		__i915_gem_userptr_set_active(obj, true);
633 
634 	if (IS_ERR(pages))
635 		unpin_user_pages(pvec, pinned);
636 	kvfree(pvec);
637 
638 	return PTR_ERR_OR_ZERO(pages);
639 }
640 
641 static void
642 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
643 			   struct sg_table *pages)
644 {
645 	struct sgt_iter sgt_iter;
646 	struct page *page;
647 
648 	/* Cancel any inflight work and force them to restart their gup */
649 	obj->userptr.work = NULL;
650 	__i915_gem_userptr_set_active(obj, false);
651 	if (!pages)
652 		return;
653 
654 	__i915_gem_object_release_shmem(obj, pages, true);
655 	i915_gem_gtt_finish_pages(obj, pages);
656 
657 	/*
658 	 * We always mark objects as dirty when they are used by the GPU,
659 	 * just in case. However, if we set the vma as being read-only we know
660 	 * that the object will never have been written to.
661 	 */
662 	if (i915_gem_object_is_readonly(obj))
663 		obj->mm.dirty = false;
664 
665 	for_each_sgt_page(page, sgt_iter, pages) {
666 		if (obj->mm.dirty && trylock_page(page)) {
667 			/*
668 			 * As this may not be anonymous memory (e.g. shmem)
669 			 * but exist on a real mapping, we have to lock
670 			 * the page in order to dirty it -- holding
671 			 * the page reference is not sufficient to
672 			 * prevent the inode from being truncated.
673 			 * Play safe and take the lock.
674 			 *
675 			 * However...!
676 			 *
677 			 * The mmu-notifier can be invalidated for a
678 			 * migrate_page, that is alreadying holding the lock
679 			 * on the page. Such a try_to_unmap() will result
680 			 * in us calling put_pages() and so recursively try
681 			 * to lock the page. We avoid that deadlock with
682 			 * a trylock_page() and in exchange we risk missing
683 			 * some page dirtying.
684 			 */
685 			set_page_dirty(page);
686 			unlock_page(page);
687 		}
688 
689 		mark_page_accessed(page);
690 		unpin_user_page(page);
691 	}
692 	obj->mm.dirty = false;
693 
694 	sg_free_table(pages);
695 	kfree(pages);
696 }
697 
698 static void
699 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
700 {
701 	i915_gem_userptr_release__mmu_notifier(obj);
702 	i915_gem_userptr_release__mm_struct(obj);
703 }
704 
705 static int
706 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
707 {
708 	if (obj->userptr.mmu_object)
709 		return 0;
710 
711 	return i915_gem_userptr_init__mmu_notifier(obj, 0);
712 }
713 
714 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
715 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
716 		 I915_GEM_OBJECT_IS_SHRINKABLE |
717 		 I915_GEM_OBJECT_NO_MMAP |
718 		 I915_GEM_OBJECT_ASYNC_CANCEL,
719 	.get_pages = i915_gem_userptr_get_pages,
720 	.put_pages = i915_gem_userptr_put_pages,
721 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
722 	.release = i915_gem_userptr_release,
723 };
724 
725 /*
726  * Creates a new mm object that wraps some normal memory from the process
727  * context - user memory.
728  *
729  * We impose several restrictions upon the memory being mapped
730  * into the GPU.
731  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
732  * 2. It must be normal system memory, not a pointer into another map of IO
733  *    space (e.g. it must not be a GTT mmapping of another object).
734  * 3. We only allow a bo as large as we could in theory map into the GTT,
735  *    that is we limit the size to the total size of the GTT.
736  * 4. The bo is marked as being snoopable. The backing pages are left
737  *    accessible directly by the CPU, but reads and writes by the GPU may
738  *    incur the cost of a snoop (unless you have an LLC architecture).
739  *
740  * Synchronisation between multiple users and the GPU is left to userspace
741  * through the normal set-domain-ioctl. The kernel will enforce that the
742  * GPU relinquishes the VMA before it is returned back to the system
743  * i.e. upon free(), munmap() or process termination. However, the userspace
744  * malloc() library may not immediately relinquish the VMA after free() and
745  * instead reuse it whilst the GPU is still reading and writing to the VMA.
746  * Caveat emptor.
747  *
748  * Also note, that the object created here is not currently a "first class"
749  * object, in that several ioctls are banned. These are the CPU access
750  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
751  * direct access via your pointer rather than use those ioctls. Another
752  * restriction is that we do not allow userptr surfaces to be pinned to the
753  * hardware and so we reject any attempt to create a framebuffer out of a
754  * userptr.
755  *
756  * If you think this is a good interface to use to pass GPU memory between
757  * drivers, please use dma-buf instead. In fact, wherever possible use
758  * dma-buf instead.
759  */
760 int
761 i915_gem_userptr_ioctl(struct drm_device *dev,
762 		       void *data,
763 		       struct drm_file *file)
764 {
765 	static struct lock_class_key lock_class;
766 	struct drm_i915_private *dev_priv = to_i915(dev);
767 	struct drm_i915_gem_userptr *args = data;
768 	struct drm_i915_gem_object *obj;
769 	int ret;
770 	u32 handle;
771 
772 	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
773 		/* We cannot support coherent userptr objects on hw without
774 		 * LLC and broken snooping.
775 		 */
776 		return -ENODEV;
777 	}
778 
779 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
780 			    I915_USERPTR_UNSYNCHRONIZED))
781 		return -EINVAL;
782 
783 	/*
784 	 * XXX: There is a prevalence of the assumption that we fit the
785 	 * object's page count inside a 32bit _signed_ variable. Let's document
786 	 * this and catch if we ever need to fix it. In the meantime, if you do
787 	 * spot such a local variable, please consider fixing!
788 	 *
789 	 * Aside from our own locals (for which we have no excuse!):
790 	 * - sg_table embeds unsigned int for num_pages
791 	 * - get_user_pages*() mixed ints with longs
792 	 */
793 
794 	if (args->user_size >> PAGE_SHIFT > INT_MAX)
795 		return -E2BIG;
796 
797 	if (overflows_type(args->user_size, obj->base.size))
798 		return -E2BIG;
799 
800 	if (!args->user_size)
801 		return -EINVAL;
802 
803 	if (offset_in_page(args->user_ptr | args->user_size))
804 		return -EINVAL;
805 
806 	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
807 		return -EFAULT;
808 
809 	if (args->flags & I915_USERPTR_READ_ONLY) {
810 		/*
811 		 * On almost all of the older hw, we cannot tell the GPU that
812 		 * a page is readonly.
813 		 */
814 		if (!dev_priv->gt.vm->has_read_only)
815 			return -ENODEV;
816 	}
817 
818 	obj = i915_gem_object_alloc();
819 	if (obj == NULL)
820 		return -ENOMEM;
821 
822 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
823 	i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
824 	obj->read_domains = I915_GEM_DOMAIN_CPU;
825 	obj->write_domain = I915_GEM_DOMAIN_CPU;
826 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
827 
828 	obj->userptr.ptr = args->user_ptr;
829 	if (args->flags & I915_USERPTR_READ_ONLY)
830 		i915_gem_object_set_readonly(obj);
831 
832 	/* And keep a pointer to the current->mm for resolving the user pages
833 	 * at binding. This means that we need to hook into the mmu_notifier
834 	 * in order to detect if the mmu is destroyed.
835 	 */
836 	ret = i915_gem_userptr_init__mm_struct(obj);
837 	if (ret == 0)
838 		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
839 	if (ret == 0)
840 		ret = drm_gem_handle_create(file, &obj->base, &handle);
841 
842 	/* drop reference from allocate - handle holds it now */
843 	i915_gem_object_put(obj);
844 	if (ret)
845 		return ret;
846 
847 	args->handle = handle;
848 	return 0;
849 }
850 
851 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
852 {
853 	mutex_init(&dev_priv->mm_lock);
854 	hash_init(dev_priv->mm_structs);
855 
856 	dev_priv->mm.userptr_wq =
857 		alloc_workqueue("i915-userptr-acquire",
858 				WQ_HIGHPRI | WQ_UNBOUND,
859 				0);
860 	if (!dev_priv->mm.userptr_wq)
861 		return -ENOMEM;
862 
863 	return 0;
864 }
865 
866 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
867 {
868 	destroy_workqueue(dev_priv->mm.userptr_wq);
869 }
870