1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2012-2014 Intel Corporation
5  *
6   * Based on amdgpu_mn, which bears the following notice:
7  *
8  * Copyright 2014 Advanced Micro Devices, Inc.
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * The above copyright notice and this permission notice (including the
28  * next paragraph) shall be included in all copies or substantial portions
29  * of the Software.
30  *
31  */
32 /*
33  * Authors:
34  *    Christian König <christian.koenig@amd.com>
35  */
36 
37 #include <linux/mmu_context.h>
38 #include <linux/mempolicy.h>
39 #include <linux/swap.h>
40 #include <linux/sched/mm.h>
41 
42 #include "i915_drv.h"
43 #include "i915_gem_ioctls.h"
44 #include "i915_gem_object.h"
45 #include "i915_scatterlist.h"
46 
47 #ifdef CONFIG_MMU_NOTIFIER
48 
49 /**
50  * i915_gem_userptr_invalidate - callback to notify about mm change
51  *
52  * @mni: the range (mm) is about to update
53  * @range: details on the invalidation
54  * @cur_seq: Value to pass to mmu_interval_set_seq()
55  *
56  * Block for operations on BOs to finish and mark pages as accessed and
57  * potentially dirty.
58  */
59 static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
60 					const struct mmu_notifier_range *range,
61 					unsigned long cur_seq)
62 {
63 	struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
64 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
65 	long r;
66 
67 	if (!mmu_notifier_range_blockable(range))
68 		return false;
69 
70 	write_lock(&i915->mm.notifier_lock);
71 
72 	mmu_interval_set_seq(mni, cur_seq);
73 
74 	write_unlock(&i915->mm.notifier_lock);
75 
76 	/*
77 	 * We don't wait when the process is exiting. This is valid
78 	 * because the object will be cleaned up anyway.
79 	 *
80 	 * This is also temporarily required as a hack, because we
81 	 * cannot currently force non-consistent batch buffers to preempt
82 	 * and reschedule by waiting on it, hanging processes on exit.
83 	 */
84 	if (current->flags & PF_EXITING)
85 		return true;
86 
87 	/* we will unbind on next submission, still have userptr pins */
88 	r = dma_resv_wait_timeout(obj->base.resv, true, false,
89 				  MAX_SCHEDULE_TIMEOUT);
90 	if (r <= 0)
91 		drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
92 
93 	return true;
94 }
95 
96 static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
97 	.invalidate = i915_gem_userptr_invalidate,
98 };
99 
100 static int
101 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
102 {
103 	return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
104 					    obj->userptr.ptr, obj->base.size,
105 					    &i915_gem_userptr_notifier_ops);
106 }
107 
108 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
109 {
110 	struct page **pvec = NULL;
111 
112 	assert_object_held_shared(obj);
113 
114 	if (!--obj->userptr.page_ref) {
115 		pvec = obj->userptr.pvec;
116 		obj->userptr.pvec = NULL;
117 	}
118 	GEM_BUG_ON(obj->userptr.page_ref < 0);
119 
120 	if (pvec) {
121 		const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
122 
123 		unpin_user_pages(pvec, num_pages);
124 		kvfree(pvec);
125 	}
126 }
127 
128 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
129 {
130 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
131 	unsigned int max_segment = i915_sg_segment_size();
132 	struct sg_table *st;
133 	unsigned int sg_page_sizes;
134 	struct page **pvec;
135 	int ret;
136 
137 	st = kmalloc(sizeof(*st), GFP_KERNEL);
138 	if (!st)
139 		return -ENOMEM;
140 
141 	if (!obj->userptr.page_ref) {
142 		ret = -EAGAIN;
143 		goto err_free;
144 	}
145 
146 	obj->userptr.page_ref++;
147 	pvec = obj->userptr.pvec;
148 
149 alloc_table:
150 	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
151 						num_pages << PAGE_SHIFT,
152 						max_segment, GFP_KERNEL);
153 	if (ret)
154 		goto err;
155 
156 	ret = i915_gem_gtt_prepare_pages(obj, st);
157 	if (ret) {
158 		sg_free_table(st);
159 
160 		if (max_segment > PAGE_SIZE) {
161 			max_segment = PAGE_SIZE;
162 			goto alloc_table;
163 		}
164 
165 		goto err;
166 	}
167 
168 	sg_page_sizes = i915_sg_dma_sizes(st->sgl);
169 
170 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
171 
172 	return 0;
173 
174 err:
175 	i915_gem_object_userptr_drop_ref(obj);
176 err_free:
177 	kfree(st);
178 	return ret;
179 }
180 
181 static void
182 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
183 			   struct sg_table *pages)
184 {
185 	struct sgt_iter sgt_iter;
186 	struct page *page;
187 
188 	if (!pages)
189 		return;
190 
191 	__i915_gem_object_release_shmem(obj, pages, true);
192 	i915_gem_gtt_finish_pages(obj, pages);
193 
194 	/*
195 	 * We always mark objects as dirty when they are used by the GPU,
196 	 * just in case. However, if we set the vma as being read-only we know
197 	 * that the object will never have been written to.
198 	 */
199 	if (i915_gem_object_is_readonly(obj))
200 		obj->mm.dirty = false;
201 
202 	for_each_sgt_page(page, sgt_iter, pages) {
203 		if (obj->mm.dirty && trylock_page(page)) {
204 			/*
205 			 * As this may not be anonymous memory (e.g. shmem)
206 			 * but exist on a real mapping, we have to lock
207 			 * the page in order to dirty it -- holding
208 			 * the page reference is not sufficient to
209 			 * prevent the inode from being truncated.
210 			 * Play safe and take the lock.
211 			 *
212 			 * However...!
213 			 *
214 			 * The mmu-notifier can be invalidated for a
215 			 * migrate_page, that is alreadying holding the lock
216 			 * on the page. Such a try_to_unmap() will result
217 			 * in us calling put_pages() and so recursively try
218 			 * to lock the page. We avoid that deadlock with
219 			 * a trylock_page() and in exchange we risk missing
220 			 * some page dirtying.
221 			 */
222 			set_page_dirty(page);
223 			unlock_page(page);
224 		}
225 
226 		mark_page_accessed(page);
227 	}
228 	obj->mm.dirty = false;
229 
230 	sg_free_table(pages);
231 	kfree(pages);
232 
233 	i915_gem_object_userptr_drop_ref(obj);
234 }
235 
236 static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
237 {
238 	struct sg_table *pages;
239 	int err;
240 
241 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
242 	if (err)
243 		return err;
244 
245 	if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
246 		return -EBUSY;
247 
248 	assert_object_held(obj);
249 
250 	pages = __i915_gem_object_unset_pages(obj);
251 	if (!IS_ERR_OR_NULL(pages))
252 		i915_gem_userptr_put_pages(obj, pages);
253 
254 	return err;
255 }
256 
257 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
258 {
259 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
260 	struct page **pvec;
261 	unsigned int gup_flags = 0;
262 	unsigned long notifier_seq;
263 	int pinned, ret;
264 
265 	if (obj->userptr.notifier.mm != current->mm)
266 		return -EFAULT;
267 
268 	notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
269 
270 	ret = i915_gem_object_lock_interruptible(obj, NULL);
271 	if (ret)
272 		return ret;
273 
274 	if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
275 		i915_gem_object_unlock(obj);
276 		return 0;
277 	}
278 
279 	ret = i915_gem_object_userptr_unbind(obj);
280 	i915_gem_object_unlock(obj);
281 	if (ret)
282 		return ret;
283 
284 	pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
285 	if (!pvec)
286 		return -ENOMEM;
287 
288 	if (!i915_gem_object_is_readonly(obj))
289 		gup_flags |= FOLL_WRITE;
290 
291 	pinned = ret = 0;
292 	while (pinned < num_pages) {
293 		ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
294 					  num_pages - pinned, gup_flags,
295 					  &pvec[pinned]);
296 		if (ret < 0)
297 			goto out;
298 
299 		pinned += ret;
300 	}
301 	ret = 0;
302 
303 	ret = i915_gem_object_lock_interruptible(obj, NULL);
304 	if (ret)
305 		goto out;
306 
307 	if (mmu_interval_read_retry(&obj->userptr.notifier,
308 		!obj->userptr.page_ref ? notifier_seq :
309 		obj->userptr.notifier_seq)) {
310 		ret = -EAGAIN;
311 		goto out_unlock;
312 	}
313 
314 	if (!obj->userptr.page_ref++) {
315 		obj->userptr.pvec = pvec;
316 		obj->userptr.notifier_seq = notifier_seq;
317 		pvec = NULL;
318 		ret = ____i915_gem_object_get_pages(obj);
319 	}
320 
321 	obj->userptr.page_ref--;
322 
323 out_unlock:
324 	i915_gem_object_unlock(obj);
325 
326 out:
327 	if (pvec) {
328 		unpin_user_pages(pvec, pinned);
329 		kvfree(pvec);
330 	}
331 
332 	return ret;
333 }
334 
335 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
336 {
337 	if (mmu_interval_read_retry(&obj->userptr.notifier,
338 				    obj->userptr.notifier_seq)) {
339 		/* We collided with the mmu notifier, need to retry */
340 
341 		return -EAGAIN;
342 	}
343 
344 	return 0;
345 }
346 
347 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
348 {
349 	int err;
350 
351 	err = i915_gem_object_userptr_submit_init(obj);
352 	if (err)
353 		return err;
354 
355 	err = i915_gem_object_lock_interruptible(obj, NULL);
356 	if (!err) {
357 		/*
358 		 * Since we only check validity, not use the pages,
359 		 * it doesn't matter if we collide with the mmu notifier,
360 		 * and -EAGAIN handling is not required.
361 		 */
362 		err = i915_gem_object_pin_pages(obj);
363 		if (!err)
364 			i915_gem_object_unpin_pages(obj);
365 
366 		i915_gem_object_unlock(obj);
367 	}
368 
369 	return err;
370 }
371 
372 static void
373 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
374 {
375 	GEM_WARN_ON(obj->userptr.page_ref);
376 
377 	mmu_interval_notifier_remove(&obj->userptr.notifier);
378 	obj->userptr.notifier.mm = NULL;
379 }
380 
381 static int
382 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
383 {
384 	drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
385 
386 	return -EINVAL;
387 }
388 
389 static int
390 i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
391 			const struct drm_i915_gem_pwrite *args)
392 {
393 	drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
394 
395 	return -EINVAL;
396 }
397 
398 static int
399 i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
400 		       const struct drm_i915_gem_pread *args)
401 {
402 	drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
403 
404 	return -EINVAL;
405 }
406 
407 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
408 	.name = "i915_gem_object_userptr",
409 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
410 		 I915_GEM_OBJECT_NO_MMAP |
411 		 I915_GEM_OBJECT_IS_PROXY,
412 	.get_pages = i915_gem_userptr_get_pages,
413 	.put_pages = i915_gem_userptr_put_pages,
414 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
415 	.pwrite = i915_gem_userptr_pwrite,
416 	.pread = i915_gem_userptr_pread,
417 	.release = i915_gem_userptr_release,
418 };
419 
420 #endif
421 
422 static int
423 probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
424 {
425 	const unsigned long end = addr + len;
426 	struct vm_area_struct *vma;
427 	int ret = -EFAULT;
428 
429 	mmap_read_lock(mm);
430 	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
431 		/* Check for holes, note that we also update the addr below */
432 		if (vma->vm_start > addr)
433 			break;
434 
435 		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
436 			break;
437 
438 		if (vma->vm_end >= end) {
439 			ret = 0;
440 			break;
441 		}
442 
443 		addr = vma->vm_end;
444 	}
445 	mmap_read_unlock(mm);
446 
447 	return ret;
448 }
449 
450 /*
451  * Creates a new mm object that wraps some normal memory from the process
452  * context - user memory.
453  *
454  * We impose several restrictions upon the memory being mapped
455  * into the GPU.
456  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
457  * 2. It must be normal system memory, not a pointer into another map of IO
458  *    space (e.g. it must not be a GTT mmapping of another object).
459  * 3. We only allow a bo as large as we could in theory map into the GTT,
460  *    that is we limit the size to the total size of the GTT.
461  * 4. The bo is marked as being snoopable. The backing pages are left
462  *    accessible directly by the CPU, but reads and writes by the GPU may
463  *    incur the cost of a snoop (unless you have an LLC architecture).
464  *
465  * Synchronisation between multiple users and the GPU is left to userspace
466  * through the normal set-domain-ioctl. The kernel will enforce that the
467  * GPU relinquishes the VMA before it is returned back to the system
468  * i.e. upon free(), munmap() or process termination. However, the userspace
469  * malloc() library may not immediately relinquish the VMA after free() and
470  * instead reuse it whilst the GPU is still reading and writing to the VMA.
471  * Caveat emptor.
472  *
473  * Also note, that the object created here is not currently a "first class"
474  * object, in that several ioctls are banned. These are the CPU access
475  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
476  * direct access via your pointer rather than use those ioctls. Another
477  * restriction is that we do not allow userptr surfaces to be pinned to the
478  * hardware and so we reject any attempt to create a framebuffer out of a
479  * userptr.
480  *
481  * If you think this is a good interface to use to pass GPU memory between
482  * drivers, please use dma-buf instead. In fact, wherever possible use
483  * dma-buf instead.
484  */
485 int
486 i915_gem_userptr_ioctl(struct drm_device *dev,
487 		       void *data,
488 		       struct drm_file *file)
489 {
490 	static struct lock_class_key __maybe_unused lock_class;
491 	struct drm_i915_private *dev_priv = to_i915(dev);
492 	struct drm_i915_gem_userptr *args = data;
493 	struct drm_i915_gem_object __maybe_unused *obj;
494 	int __maybe_unused ret;
495 	u32 __maybe_unused handle;
496 
497 	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
498 		/* We cannot support coherent userptr objects on hw without
499 		 * LLC and broken snooping.
500 		 */
501 		return -ENODEV;
502 	}
503 
504 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
505 			    I915_USERPTR_UNSYNCHRONIZED |
506 			    I915_USERPTR_PROBE))
507 		return -EINVAL;
508 
509 	if (i915_gem_object_size_2big(args->user_size))
510 		return -E2BIG;
511 
512 	if (!args->user_size)
513 		return -EINVAL;
514 
515 	if (offset_in_page(args->user_ptr | args->user_size))
516 		return -EINVAL;
517 
518 	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
519 		return -EFAULT;
520 
521 	if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
522 		return -ENODEV;
523 
524 	if (args->flags & I915_USERPTR_READ_ONLY) {
525 		/*
526 		 * On almost all of the older hw, we cannot tell the GPU that
527 		 * a page is readonly.
528 		 */
529 		if (!dev_priv->gt.vm->has_read_only)
530 			return -ENODEV;
531 	}
532 
533 	if (args->flags & I915_USERPTR_PROBE) {
534 		/*
535 		 * Check that the range pointed to represents real struct
536 		 * pages and not iomappings (at this moment in time!)
537 		 */
538 		ret = probe_range(current->mm, args->user_ptr, args->user_size);
539 		if (ret)
540 			return ret;
541 	}
542 
543 #ifdef CONFIG_MMU_NOTIFIER
544 	obj = i915_gem_object_alloc();
545 	if (obj == NULL)
546 		return -ENOMEM;
547 
548 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
549 	i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
550 	obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
551 	obj->read_domains = I915_GEM_DOMAIN_CPU;
552 	obj->write_domain = I915_GEM_DOMAIN_CPU;
553 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
554 
555 	obj->userptr.ptr = args->user_ptr;
556 	obj->userptr.notifier_seq = ULONG_MAX;
557 	if (args->flags & I915_USERPTR_READ_ONLY)
558 		i915_gem_object_set_readonly(obj);
559 
560 	/* And keep a pointer to the current->mm for resolving the user pages
561 	 * at binding. This means that we need to hook into the mmu_notifier
562 	 * in order to detect if the mmu is destroyed.
563 	 */
564 	ret = i915_gem_userptr_init__mmu_notifier(obj);
565 	if (ret == 0)
566 		ret = drm_gem_handle_create(file, &obj->base, &handle);
567 
568 	/* drop reference from allocate - handle holds it now */
569 	i915_gem_object_put(obj);
570 	if (ret)
571 		return ret;
572 
573 	args->handle = handle;
574 	return 0;
575 #else
576 	return -ENODEV;
577 #endif
578 }
579 
580 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
581 {
582 #ifdef CONFIG_MMU_NOTIFIER
583 	rwlock_init(&dev_priv->mm.notifier_lock);
584 #endif
585 
586 	return 0;
587 }
588 
589 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
590 {
591 }
592