xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 1f70e079)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 
21 #include "msm_drv.h"
22 #include "msm_gem.h"
23 #include "msm_gpu.h"
24 
25 
26 /* called with dev->struct_mutex held */
27 static struct page **get_pages(struct drm_gem_object *obj)
28 {
29 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
30 
31 	if (!msm_obj->pages) {
32 		struct drm_device *dev = obj->dev;
33 		struct page **p = drm_gem_get_pages(obj, 0);
34 		int npages = obj->size >> PAGE_SHIFT;
35 
36 		if (IS_ERR(p)) {
37 			dev_err(dev->dev, "could not get pages: %ld\n",
38 					PTR_ERR(p));
39 			return p;
40 		}
41 
42 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 		if (IS_ERR(msm_obj->sgt)) {
44 			dev_err(dev->dev, "failed to allocate sgt\n");
45 			return ERR_CAST(msm_obj->sgt);
46 		}
47 
48 		msm_obj->pages = p;
49 
50 		/* For non-cached buffers, ensure the new pages are clean
51 		 * because display controller, GPU, etc. are not coherent:
52 		 */
53 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
54 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
55 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
56 	}
57 
58 	return msm_obj->pages;
59 }
60 
61 static void put_pages(struct drm_gem_object *obj)
62 {
63 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
64 
65 	if (msm_obj->pages) {
66 		/* For non-cached buffers, ensure the new pages are clean
67 		 * because display controller, GPU, etc. are not coherent:
68 		 */
69 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
70 			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
71 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 		sg_free_table(msm_obj->sgt);
73 		kfree(msm_obj->sgt);
74 
75 		drm_gem_put_pages(obj, msm_obj->pages, true, false);
76 		msm_obj->pages = NULL;
77 	}
78 }
79 
80 int msm_gem_mmap_obj(struct drm_gem_object *obj,
81 		struct vm_area_struct *vma)
82 {
83 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
84 
85 	vma->vm_flags &= ~VM_PFNMAP;
86 	vma->vm_flags |= VM_MIXEDMAP;
87 
88 	if (msm_obj->flags & MSM_BO_WC) {
89 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
90 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
91 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
92 	} else {
93 		/*
94 		 * Shunt off cached objs to shmem file so they have their own
95 		 * address_space (so unmap_mapping_range does what we want,
96 		 * in particular in the case of mmap'd dmabufs)
97 		 */
98 		fput(vma->vm_file);
99 		get_file(obj->filp);
100 		vma->vm_pgoff = 0;
101 		vma->vm_file  = obj->filp;
102 
103 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
104 	}
105 
106 	return 0;
107 }
108 
109 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
110 {
111 	int ret;
112 
113 	ret = drm_gem_mmap(filp, vma);
114 	if (ret) {
115 		DBG("mmap failed: %d", ret);
116 		return ret;
117 	}
118 
119 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
120 }
121 
122 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123 {
124 	struct drm_gem_object *obj = vma->vm_private_data;
125 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
126 	struct drm_device *dev = obj->dev;
127 	struct page **pages;
128 	unsigned long pfn;
129 	pgoff_t pgoff;
130 	int ret;
131 
132 	/* Make sure we don't parallel update on a fault, nor move or remove
133 	 * something from beneath our feet
134 	 */
135 	ret = mutex_lock_interruptible(&dev->struct_mutex);
136 	if (ret)
137 		goto out;
138 
139 	/* make sure we have pages attached now */
140 	pages = get_pages(obj);
141 	if (IS_ERR(pages)) {
142 		ret = PTR_ERR(pages);
143 		goto out_unlock;
144 	}
145 
146 	/* We don't use vmf->pgoff since that has the fake offset: */
147 	pgoff = ((unsigned long)vmf->virtual_address -
148 			vma->vm_start) >> PAGE_SHIFT;
149 
150 	pfn = page_to_pfn(msm_obj->pages[pgoff]);
151 
152 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
153 			pfn, pfn << PAGE_SHIFT);
154 
155 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
156 
157 out_unlock:
158 	mutex_unlock(&dev->struct_mutex);
159 out:
160 	switch (ret) {
161 	case -EAGAIN:
162 		set_need_resched();
163 	case 0:
164 	case -ERESTARTSYS:
165 	case -EINTR:
166 		return VM_FAULT_NOPAGE;
167 	case -ENOMEM:
168 		return VM_FAULT_OOM;
169 	default:
170 		return VM_FAULT_SIGBUS;
171 	}
172 }
173 
174 /** get mmap offset */
175 static uint64_t mmap_offset(struct drm_gem_object *obj)
176 {
177 	struct drm_device *dev = obj->dev;
178 	int ret;
179 
180 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
181 
182 	/* Make it mmapable */
183 	ret = drm_gem_create_mmap_offset(obj);
184 
185 	if (ret) {
186 		dev_err(dev->dev, "could not allocate mmap offset\n");
187 		return 0;
188 	}
189 
190 	return drm_vma_node_offset_addr(&obj->vma_node);
191 }
192 
193 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
194 {
195 	uint64_t offset;
196 	mutex_lock(&obj->dev->struct_mutex);
197 	offset = mmap_offset(obj);
198 	mutex_unlock(&obj->dev->struct_mutex);
199 	return offset;
200 }
201 
202 /* helpers for dealing w/ iommu: */
203 static int map_range(struct iommu_domain *domain, unsigned int iova,
204 		struct sg_table *sgt, unsigned int len, int prot)
205 {
206 	struct scatterlist *sg;
207 	unsigned int da = iova;
208 	unsigned int i, j;
209 	int ret;
210 
211 	if (!domain || !sgt)
212 		return -EINVAL;
213 
214 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
215 		u32 pa = sg_phys(sg) - sg->offset;
216 		size_t bytes = sg->length + sg->offset;
217 
218 		VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
219 
220 		ret = iommu_map(domain, da, pa, bytes, prot);
221 		if (ret)
222 			goto fail;
223 
224 		da += bytes;
225 	}
226 
227 	return 0;
228 
229 fail:
230 	da = iova;
231 
232 	for_each_sg(sgt->sgl, sg, i, j) {
233 		size_t bytes = sg->length + sg->offset;
234 		iommu_unmap(domain, da, bytes);
235 		da += bytes;
236 	}
237 	return ret;
238 }
239 
240 static void unmap_range(struct iommu_domain *domain, unsigned int iova,
241 		struct sg_table *sgt, unsigned int len)
242 {
243 	struct scatterlist *sg;
244 	unsigned int da = iova;
245 	int i;
246 
247 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
248 		size_t bytes = sg->length + sg->offset;
249 		size_t unmapped;
250 
251 		unmapped = iommu_unmap(domain, da, bytes);
252 		if (unmapped < bytes)
253 			break;
254 
255 		VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
256 
257 		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
258 
259 		da += bytes;
260 	}
261 }
262 
263 /* should be called under struct_mutex.. although it can be called
264  * from atomic context without struct_mutex to acquire an extra
265  * iova ref if you know one is already held.
266  *
267  * That means when I do eventually need to add support for unpinning
268  * the refcnt counter needs to be atomic_t.
269  */
270 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
271 		uint32_t *iova)
272 {
273 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
274 	int ret = 0;
275 
276 	if (!msm_obj->domain[id].iova) {
277 		struct msm_drm_private *priv = obj->dev->dev_private;
278 		uint32_t offset = (uint32_t)mmap_offset(obj);
279 		struct page **pages;
280 		pages = get_pages(obj);
281 		if (IS_ERR(pages))
282 			return PTR_ERR(pages);
283 		// XXX ideally we would not map buffers writable when not needed...
284 		ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
285 				obj->size, IOMMU_READ | IOMMU_WRITE);
286 		msm_obj->domain[id].iova = offset;
287 	}
288 
289 	if (!ret)
290 		*iova = msm_obj->domain[id].iova;
291 
292 	return ret;
293 }
294 
295 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
296 {
297 	int ret;
298 	mutex_lock(&obj->dev->struct_mutex);
299 	ret = msm_gem_get_iova_locked(obj, id, iova);
300 	mutex_unlock(&obj->dev->struct_mutex);
301 	return ret;
302 }
303 
304 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
305 {
306 	// XXX TODO ..
307 	// NOTE: probably don't need a _locked() version.. we wouldn't
308 	// normally unmap here, but instead just mark that it could be
309 	// unmapped (if the iova refcnt drops to zero), but then later
310 	// if another _get_iova_locked() fails we can start unmapping
311 	// things that are no longer needed..
312 }
313 
314 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
315 		struct drm_mode_create_dumb *args)
316 {
317 	args->pitch = align_pitch(args->width, args->bpp);
318 	args->size  = PAGE_ALIGN(args->pitch * args->height);
319 	return msm_gem_new_handle(dev, file, args->size,
320 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321 }
322 
323 int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 		uint32_t handle)
325 {
326 	/* No special work needed, drop the reference and see what falls out */
327 	return drm_gem_handle_delete(file, handle);
328 }
329 
330 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 		uint32_t handle, uint64_t *offset)
332 {
333 	struct drm_gem_object *obj;
334 	int ret = 0;
335 
336 	/* GEM does all our handle to object mapping */
337 	obj = drm_gem_object_lookup(dev, file, handle);
338 	if (obj == NULL) {
339 		ret = -ENOENT;
340 		goto fail;
341 	}
342 
343 	*offset = msm_gem_mmap_offset(obj);
344 
345 	drm_gem_object_unreference_unlocked(obj);
346 
347 fail:
348 	return ret;
349 }
350 
351 void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
352 {
353 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
355 	if (!msm_obj->vaddr) {
356 		struct page **pages = get_pages(obj);
357 		if (IS_ERR(pages))
358 			return ERR_CAST(pages);
359 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
360 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
361 	}
362 	return msm_obj->vaddr;
363 }
364 
365 void *msm_gem_vaddr(struct drm_gem_object *obj)
366 {
367 	void *ret;
368 	mutex_lock(&obj->dev->struct_mutex);
369 	ret = msm_gem_vaddr_locked(obj);
370 	mutex_unlock(&obj->dev->struct_mutex);
371 	return ret;
372 }
373 
374 int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
375 		struct work_struct *work)
376 {
377 	struct drm_device *dev = obj->dev;
378 	struct msm_drm_private *priv = dev->dev_private;
379 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
380 	int ret = 0;
381 
382 	mutex_lock(&dev->struct_mutex);
383 	if (!list_empty(&work->entry)) {
384 		ret = -EINVAL;
385 	} else if (is_active(msm_obj)) {
386 		list_add_tail(&work->entry, &msm_obj->inactive_work);
387 	} else {
388 		queue_work(priv->wq, work);
389 	}
390 	mutex_unlock(&dev->struct_mutex);
391 
392 	return ret;
393 }
394 
395 void msm_gem_move_to_active(struct drm_gem_object *obj,
396 		struct msm_gpu *gpu, bool write, uint32_t fence)
397 {
398 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 	msm_obj->gpu = gpu;
400 	if (write)
401 		msm_obj->write_fence = fence;
402 	else
403 		msm_obj->read_fence = fence;
404 	list_del_init(&msm_obj->mm_list);
405 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
406 }
407 
408 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
409 {
410 	struct drm_device *dev = obj->dev;
411 	struct msm_drm_private *priv = dev->dev_private;
412 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
413 
414 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
415 
416 	msm_obj->gpu = NULL;
417 	msm_obj->read_fence = 0;
418 	msm_obj->write_fence = 0;
419 	list_del_init(&msm_obj->mm_list);
420 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
421 
422 	while (!list_empty(&msm_obj->inactive_work)) {
423 		struct work_struct *work;
424 
425 		work = list_first_entry(&msm_obj->inactive_work,
426 				struct work_struct, entry);
427 
428 		list_del_init(&work->entry);
429 		queue_work(priv->wq, work);
430 	}
431 }
432 
433 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
434 		struct timespec *timeout)
435 {
436 	struct drm_device *dev = obj->dev;
437 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
438 	int ret = 0;
439 
440 	if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) {
441 		uint32_t fence = 0;
442 		if (op & MSM_PREP_READ)
443 			fence = msm_obj->write_fence;
444 		if (op & MSM_PREP_WRITE)
445 			fence = max(fence, msm_obj->read_fence);
446 		ret = msm_wait_fence_interruptable(dev, fence, timeout);
447 	}
448 
449 	/* TODO cache maintenance */
450 
451 	return ret;
452 }
453 
454 int msm_gem_cpu_fini(struct drm_gem_object *obj)
455 {
456 	/* TODO cache maintenance */
457 	return 0;
458 }
459 
460 #ifdef CONFIG_DEBUG_FS
461 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
462 {
463 	struct drm_device *dev = obj->dev;
464 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
465 	uint64_t off = drm_vma_node_start(&obj->vma_node);
466 
467 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
468 	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
469 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
470 			msm_obj->read_fence, msm_obj->write_fence,
471 			obj->name, obj->refcount.refcount.counter,
472 			off, msm_obj->vaddr, obj->size);
473 }
474 
475 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
476 {
477 	struct msm_gem_object *msm_obj;
478 	int count = 0;
479 	size_t size = 0;
480 
481 	list_for_each_entry(msm_obj, list, mm_list) {
482 		struct drm_gem_object *obj = &msm_obj->base;
483 		seq_printf(m, "   ");
484 		msm_gem_describe(obj, m);
485 		count++;
486 		size += obj->size;
487 	}
488 
489 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
490 }
491 #endif
492 
493 void msm_gem_free_object(struct drm_gem_object *obj)
494 {
495 	struct drm_device *dev = obj->dev;
496 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
497 	int id;
498 
499 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
500 
501 	/* object should not be on active list: */
502 	WARN_ON(is_active(msm_obj));
503 
504 	list_del(&msm_obj->mm_list);
505 
506 	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
507 		if (msm_obj->domain[id].iova) {
508 			struct msm_drm_private *priv = obj->dev->dev_private;
509 			uint32_t offset = (uint32_t)mmap_offset(obj);
510 			unmap_range(priv->iommus[id], offset,
511 					msm_obj->sgt, obj->size);
512 		}
513 	}
514 
515 	drm_gem_free_mmap_offset(obj);
516 
517 	if (msm_obj->vaddr)
518 		vunmap(msm_obj->vaddr);
519 
520 	put_pages(obj);
521 
522 	if (msm_obj->resv == &msm_obj->_resv)
523 		reservation_object_fini(msm_obj->resv);
524 
525 	drm_gem_object_release(obj);
526 
527 	kfree(msm_obj);
528 }
529 
530 /* convenience method to construct a GEM buffer object, and userspace handle */
531 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
532 		uint32_t size, uint32_t flags, uint32_t *handle)
533 {
534 	struct drm_gem_object *obj;
535 	int ret;
536 
537 	ret = mutex_lock_interruptible(&dev->struct_mutex);
538 	if (ret)
539 		return ret;
540 
541 	obj = msm_gem_new(dev, size, flags);
542 
543 	mutex_unlock(&dev->struct_mutex);
544 
545 	if (IS_ERR(obj))
546 		return PTR_ERR(obj);
547 
548 	ret = drm_gem_handle_create(file, obj, handle);
549 
550 	/* drop reference from allocate - handle holds it now */
551 	drm_gem_object_unreference_unlocked(obj);
552 
553 	return ret;
554 }
555 
556 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
557 		uint32_t size, uint32_t flags)
558 {
559 	struct msm_drm_private *priv = dev->dev_private;
560 	struct msm_gem_object *msm_obj;
561 	struct drm_gem_object *obj = NULL;
562 	int ret;
563 
564 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
565 
566 	size = PAGE_ALIGN(size);
567 
568 	switch (flags & MSM_BO_CACHE_MASK) {
569 	case MSM_BO_UNCACHED:
570 	case MSM_BO_CACHED:
571 	case MSM_BO_WC:
572 		break;
573 	default:
574 		dev_err(dev->dev, "invalid cache flag: %x\n",
575 				(flags & MSM_BO_CACHE_MASK));
576 		ret = -EINVAL;
577 		goto fail;
578 	}
579 
580 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
581 	if (!msm_obj) {
582 		ret = -ENOMEM;
583 		goto fail;
584 	}
585 
586 	obj = &msm_obj->base;
587 
588 	ret = drm_gem_object_init(dev, obj, size);
589 	if (ret)
590 		goto fail;
591 
592 	msm_obj->flags = flags;
593 
594 	msm_obj->resv = &msm_obj->_resv;
595 	reservation_object_init(msm_obj->resv);
596 
597 	INIT_LIST_HEAD(&msm_obj->submit_entry);
598 	INIT_LIST_HEAD(&msm_obj->inactive_work);
599 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
600 
601 	return obj;
602 
603 fail:
604 	if (obj)
605 		drm_gem_object_unreference_unlocked(obj);
606 
607 	return ERR_PTR(ret);
608 }
609