xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_bo_vm.c (revision efe4a1ac)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
37 #include <linux/mm.h>
38 #include <linux/pfn_t.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 
43 #define TTM_BO_VM_NUM_PREFAULT 16
44 
45 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
46 				struct vm_fault *vmf)
47 {
48 	int ret = 0;
49 
50 	if (likely(!bo->moving))
51 		goto out_unlock;
52 
53 	/*
54 	 * Quick non-stalling check for idle.
55 	 */
56 	if (dma_fence_is_signaled(bo->moving))
57 		goto out_clear;
58 
59 	/*
60 	 * If possible, avoid waiting for GPU with mmap_sem
61 	 * held.
62 	 */
63 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
64 		ret = VM_FAULT_RETRY;
65 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
66 			goto out_unlock;
67 
68 		ttm_bo_reference(bo);
69 		up_read(&vmf->vma->vm_mm->mmap_sem);
70 		(void) dma_fence_wait(bo->moving, true);
71 		ttm_bo_unreserve(bo);
72 		ttm_bo_unref(&bo);
73 		goto out_unlock;
74 	}
75 
76 	/*
77 	 * Ordinary wait.
78 	 */
79 	ret = dma_fence_wait(bo->moving, true);
80 	if (unlikely(ret != 0)) {
81 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
82 			VM_FAULT_NOPAGE;
83 		goto out_unlock;
84 	}
85 
86 out_clear:
87 	dma_fence_put(bo->moving);
88 	bo->moving = NULL;
89 
90 out_unlock:
91 	return ret;
92 }
93 
94 static int ttm_bo_vm_fault(struct vm_fault *vmf)
95 {
96 	struct vm_area_struct *vma = vmf->vma;
97 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
98 	    vma->vm_private_data;
99 	struct ttm_bo_device *bdev = bo->bdev;
100 	unsigned long page_offset;
101 	unsigned long page_last;
102 	unsigned long pfn;
103 	struct ttm_tt *ttm = NULL;
104 	struct page *page;
105 	int ret;
106 	int i;
107 	unsigned long address = vmf->address;
108 	int retval = VM_FAULT_NOPAGE;
109 	struct ttm_mem_type_manager *man =
110 		&bdev->man[bo->mem.mem_type];
111 	struct vm_area_struct cvma;
112 
113 	/*
114 	 * Work around locking order reversal in fault / nopfn
115 	 * between mmap_sem and bo_reserve: Perform a trylock operation
116 	 * for reserve, and if it fails, retry the fault after waiting
117 	 * for the buffer to become unreserved.
118 	 */
119 	ret = ttm_bo_reserve(bo, true, true, NULL);
120 	if (unlikely(ret != 0)) {
121 		if (ret != -EBUSY)
122 			return VM_FAULT_NOPAGE;
123 
124 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
125 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
126 				ttm_bo_reference(bo);
127 				up_read(&vmf->vma->vm_mm->mmap_sem);
128 				(void) ttm_bo_wait_unreserved(bo);
129 				ttm_bo_unref(&bo);
130 			}
131 
132 			return VM_FAULT_RETRY;
133 		}
134 
135 		/*
136 		 * If we'd want to change locking order to
137 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
138 		 * instead of retrying the fault...
139 		 */
140 		return VM_FAULT_NOPAGE;
141 	}
142 
143 	/*
144 	 * Refuse to fault imported pages. This should be handled
145 	 * (if at all) by redirecting mmap to the exporter.
146 	 */
147 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
148 		retval = VM_FAULT_SIGBUS;
149 		goto out_unlock;
150 	}
151 
152 	if (bdev->driver->fault_reserve_notify) {
153 		ret = bdev->driver->fault_reserve_notify(bo);
154 		switch (ret) {
155 		case 0:
156 			break;
157 		case -EBUSY:
158 		case -ERESTARTSYS:
159 			retval = VM_FAULT_NOPAGE;
160 			goto out_unlock;
161 		default:
162 			retval = VM_FAULT_SIGBUS;
163 			goto out_unlock;
164 		}
165 	}
166 
167 	/*
168 	 * Wait for buffer data in transit, due to a pipelined
169 	 * move.
170 	 */
171 	ret = ttm_bo_vm_fault_idle(bo, vmf);
172 	if (unlikely(ret != 0)) {
173 		retval = ret;
174 
175 		if (retval == VM_FAULT_RETRY &&
176 		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
177 			/* The BO has already been unreserved. */
178 			return retval;
179 		}
180 
181 		goto out_unlock;
182 	}
183 
184 	ret = ttm_mem_io_lock(man, true);
185 	if (unlikely(ret != 0)) {
186 		retval = VM_FAULT_NOPAGE;
187 		goto out_unlock;
188 	}
189 	ret = ttm_mem_io_reserve_vm(bo);
190 	if (unlikely(ret != 0)) {
191 		retval = VM_FAULT_SIGBUS;
192 		goto out_io_unlock;
193 	}
194 
195 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
196 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
197 	page_last = vma_pages(vma) + vma->vm_pgoff -
198 		drm_vma_node_start(&bo->vma_node);
199 
200 	if (unlikely(page_offset >= bo->num_pages)) {
201 		retval = VM_FAULT_SIGBUS;
202 		goto out_io_unlock;
203 	}
204 
205 	/*
206 	 * Make a local vma copy to modify the page_prot member
207 	 * and vm_flags if necessary. The vma parameter is protected
208 	 * by mmap_sem in write mode.
209 	 */
210 	cvma = *vma;
211 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
212 
213 	if (bo->mem.bus.is_iomem) {
214 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
215 						cvma.vm_page_prot);
216 	} else {
217 		ttm = bo->ttm;
218 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
219 						cvma.vm_page_prot);
220 
221 		/* Allocate all page at once, most common usage */
222 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
223 			retval = VM_FAULT_OOM;
224 			goto out_io_unlock;
225 		}
226 	}
227 
228 	/*
229 	 * Speculatively prefault a number of pages. Only error on
230 	 * first page.
231 	 */
232 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
233 		if (bo->mem.bus.is_iomem)
234 			pfn = bdev->driver->io_mem_pfn(bo, page_offset);
235 		else {
236 			page = ttm->pages[page_offset];
237 			if (unlikely(!page && i == 0)) {
238 				retval = VM_FAULT_OOM;
239 				goto out_io_unlock;
240 			} else if (unlikely(!page)) {
241 				break;
242 			}
243 			page->mapping = vma->vm_file->f_mapping;
244 			page->index = drm_vma_node_start(&bo->vma_node) +
245 				page_offset;
246 			pfn = page_to_pfn(page);
247 		}
248 
249 		if (vma->vm_flags & VM_MIXEDMAP)
250 			ret = vm_insert_mixed(&cvma, address,
251 					__pfn_to_pfn_t(pfn, PFN_DEV));
252 		else
253 			ret = vm_insert_pfn(&cvma, address, pfn);
254 
255 		/*
256 		 * Somebody beat us to this PTE or prefaulting to
257 		 * an already populated PTE, or prefaulting error.
258 		 */
259 
260 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
261 			break;
262 		else if (unlikely(ret != 0)) {
263 			retval =
264 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
265 			goto out_io_unlock;
266 		}
267 
268 		address += PAGE_SIZE;
269 		if (unlikely(++page_offset >= page_last))
270 			break;
271 	}
272 out_io_unlock:
273 	ttm_mem_io_unlock(man);
274 out_unlock:
275 	ttm_bo_unreserve(bo);
276 	return retval;
277 }
278 
279 static void ttm_bo_vm_open(struct vm_area_struct *vma)
280 {
281 	struct ttm_buffer_object *bo =
282 	    (struct ttm_buffer_object *)vma->vm_private_data;
283 
284 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
285 
286 	(void)ttm_bo_reference(bo);
287 }
288 
289 static void ttm_bo_vm_close(struct vm_area_struct *vma)
290 {
291 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
292 
293 	ttm_bo_unref(&bo);
294 	vma->vm_private_data = NULL;
295 }
296 
297 static const struct vm_operations_struct ttm_bo_vm_ops = {
298 	.fault = ttm_bo_vm_fault,
299 	.open = ttm_bo_vm_open,
300 	.close = ttm_bo_vm_close
301 };
302 
303 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
304 						  unsigned long offset,
305 						  unsigned long pages)
306 {
307 	struct drm_vma_offset_node *node;
308 	struct ttm_buffer_object *bo = NULL;
309 
310 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
311 
312 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
313 	if (likely(node)) {
314 		bo = container_of(node, struct ttm_buffer_object, vma_node);
315 		if (!kref_get_unless_zero(&bo->kref))
316 			bo = NULL;
317 	}
318 
319 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
320 
321 	if (!bo)
322 		pr_err("Could not find buffer object to map\n");
323 
324 	return bo;
325 }
326 
327 unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
328 					unsigned long page_offset)
329 {
330 	return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
331 		+ page_offset;
332 }
333 EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
334 
335 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
336 		struct ttm_bo_device *bdev)
337 {
338 	struct ttm_bo_driver *driver;
339 	struct ttm_buffer_object *bo;
340 	int ret;
341 
342 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
343 	if (unlikely(!bo))
344 		return -EINVAL;
345 
346 	driver = bo->bdev->driver;
347 	if (unlikely(!driver->verify_access)) {
348 		ret = -EPERM;
349 		goto out_unref;
350 	}
351 	ret = driver->verify_access(bo, filp);
352 	if (unlikely(ret != 0))
353 		goto out_unref;
354 
355 	vma->vm_ops = &ttm_bo_vm_ops;
356 
357 	/*
358 	 * Note: We're transferring the bo reference to
359 	 * vma->vm_private_data here.
360 	 */
361 
362 	vma->vm_private_data = bo;
363 
364 	/*
365 	 * We'd like to use VM_PFNMAP on shared mappings, where
366 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
367 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
368 	 * bad for performance. Until that has been sorted out, use
369 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
370 	 */
371 	vma->vm_flags |= VM_MIXEDMAP;
372 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
373 	return 0;
374 out_unref:
375 	ttm_bo_unref(&bo);
376 	return ret;
377 }
378 EXPORT_SYMBOL(ttm_bo_mmap);
379 
380 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
381 {
382 	if (vma->vm_pgoff != 0)
383 		return -EACCES;
384 
385 	vma->vm_ops = &ttm_bo_vm_ops;
386 	vma->vm_private_data = ttm_bo_reference(bo);
387 	vma->vm_flags |= VM_MIXEDMAP;
388 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
389 	return 0;
390 }
391 EXPORT_SYMBOL(ttm_fbdev_mmap);
392