xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision edd4fc63)
1c8afe684SRob Clark /*
2c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
3c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
4c8afe684SRob Clark  *
5c8afe684SRob Clark  * This program is free software; you can redistribute it and/or modify it
6c8afe684SRob Clark  * under the terms of the GNU General Public License version 2 as published by
7c8afe684SRob Clark  * the Free Software Foundation.
8c8afe684SRob Clark  *
9c8afe684SRob Clark  * This program is distributed in the hope that it will be useful, but WITHOUT
10c8afe684SRob Clark  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11c8afe684SRob Clark  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12c8afe684SRob Clark  * more details.
13c8afe684SRob Clark  *
14c8afe684SRob Clark  * You should have received a copy of the GNU General Public License along with
15c8afe684SRob Clark  * this program.  If not, see <http://www.gnu.org/licenses/>.
16c8afe684SRob Clark  */
17c8afe684SRob Clark 
18c8afe684SRob Clark #include <linux/spinlock.h>
19c8afe684SRob Clark #include <linux/shmem_fs.h>
2005b84911SRob Clark #include <linux/dma-buf.h>
21c8afe684SRob Clark 
22c8afe684SRob Clark #include "msm_drv.h"
23c8afe684SRob Clark #include "msm_gem.h"
247198e6b0SRob Clark #include "msm_gpu.h"
25c8afe684SRob Clark 
26c8afe684SRob Clark 
27c8afe684SRob Clark /* called with dev->struct_mutex held */
28c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
29c8afe684SRob Clark {
30c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
31c8afe684SRob Clark 
32c8afe684SRob Clark 	if (!msm_obj->pages) {
33c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
34c8afe684SRob Clark 		struct page **p = drm_gem_get_pages(obj, 0);
35c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
36c8afe684SRob Clark 
37c8afe684SRob Clark 		if (IS_ERR(p)) {
38c8afe684SRob Clark 			dev_err(dev->dev, "could not get pages: %ld\n",
39c8afe684SRob Clark 					PTR_ERR(p));
40c8afe684SRob Clark 			return p;
41c8afe684SRob Clark 		}
42c8afe684SRob Clark 
43c8afe684SRob Clark 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
441f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
45c8afe684SRob Clark 			dev_err(dev->dev, "failed to allocate sgt\n");
461f70e079SWei Yongjun 			return ERR_CAST(msm_obj->sgt);
47c8afe684SRob Clark 		}
48c8afe684SRob Clark 
49c8afe684SRob Clark 		msm_obj->pages = p;
50c8afe684SRob Clark 
51c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
52c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
53c8afe684SRob Clark 		 */
54c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
55c8afe684SRob Clark 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
56c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
57c8afe684SRob Clark 	}
58c8afe684SRob Clark 
59c8afe684SRob Clark 	return msm_obj->pages;
60c8afe684SRob Clark }
61c8afe684SRob Clark 
62c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
63c8afe684SRob Clark {
64c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
65c8afe684SRob Clark 
66c8afe684SRob Clark 	if (msm_obj->pages) {
67c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
68c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
69c8afe684SRob Clark 		 */
70c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
71c8afe684SRob Clark 			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
72c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
73c8afe684SRob Clark 		sg_free_table(msm_obj->sgt);
74c8afe684SRob Clark 		kfree(msm_obj->sgt);
75c8afe684SRob Clark 
76c8afe684SRob Clark 		drm_gem_put_pages(obj, msm_obj->pages, true, false);
77c8afe684SRob Clark 		msm_obj->pages = NULL;
78c8afe684SRob Clark 	}
79c8afe684SRob Clark }
80c8afe684SRob Clark 
8105b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
8205b84911SRob Clark {
8305b84911SRob Clark 	struct drm_device *dev = obj->dev;
8405b84911SRob Clark 	struct page **p;
8505b84911SRob Clark 	mutex_lock(&dev->struct_mutex);
8605b84911SRob Clark 	p = get_pages(obj);
8705b84911SRob Clark 	mutex_unlock(&dev->struct_mutex);
8805b84911SRob Clark 	return p;
8905b84911SRob Clark }
9005b84911SRob Clark 
9105b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
9205b84911SRob Clark {
9305b84911SRob Clark 	/* when we start tracking the pin count, then do something here */
9405b84911SRob Clark }
9505b84911SRob Clark 
96c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj,
97c8afe684SRob Clark 		struct vm_area_struct *vma)
98c8afe684SRob Clark {
99c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
100c8afe684SRob Clark 
101c8afe684SRob Clark 	vma->vm_flags &= ~VM_PFNMAP;
102c8afe684SRob Clark 	vma->vm_flags |= VM_MIXEDMAP;
103c8afe684SRob Clark 
104c8afe684SRob Clark 	if (msm_obj->flags & MSM_BO_WC) {
105c8afe684SRob Clark 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
106c8afe684SRob Clark 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
107c8afe684SRob Clark 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
108c8afe684SRob Clark 	} else {
109c8afe684SRob Clark 		/*
110c8afe684SRob Clark 		 * Shunt off cached objs to shmem file so they have their own
111c8afe684SRob Clark 		 * address_space (so unmap_mapping_range does what we want,
112c8afe684SRob Clark 		 * in particular in the case of mmap'd dmabufs)
113c8afe684SRob Clark 		 */
114c8afe684SRob Clark 		fput(vma->vm_file);
115c8afe684SRob Clark 		get_file(obj->filp);
116c8afe684SRob Clark 		vma->vm_pgoff = 0;
117c8afe684SRob Clark 		vma->vm_file  = obj->filp;
118c8afe684SRob Clark 
119c8afe684SRob Clark 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
120c8afe684SRob Clark 	}
121c8afe684SRob Clark 
122c8afe684SRob Clark 	return 0;
123c8afe684SRob Clark }
124c8afe684SRob Clark 
125c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
126c8afe684SRob Clark {
127c8afe684SRob Clark 	int ret;
128c8afe684SRob Clark 
129c8afe684SRob Clark 	ret = drm_gem_mmap(filp, vma);
130c8afe684SRob Clark 	if (ret) {
131c8afe684SRob Clark 		DBG("mmap failed: %d", ret);
132c8afe684SRob Clark 		return ret;
133c8afe684SRob Clark 	}
134c8afe684SRob Clark 
135c8afe684SRob Clark 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
136c8afe684SRob Clark }
137c8afe684SRob Clark 
138c8afe684SRob Clark int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
139c8afe684SRob Clark {
140c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
141c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
142c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
143c8afe684SRob Clark 	struct page **pages;
144c8afe684SRob Clark 	unsigned long pfn;
145c8afe684SRob Clark 	pgoff_t pgoff;
146c8afe684SRob Clark 	int ret;
147c8afe684SRob Clark 
148c8afe684SRob Clark 	/* Make sure we don't parallel update on a fault, nor move or remove
149c8afe684SRob Clark 	 * something from beneath our feet
150c8afe684SRob Clark 	 */
151c8afe684SRob Clark 	ret = mutex_lock_interruptible(&dev->struct_mutex);
152c8afe684SRob Clark 	if (ret)
153c8afe684SRob Clark 		goto out;
154c8afe684SRob Clark 
155c8afe684SRob Clark 	/* make sure we have pages attached now */
156c8afe684SRob Clark 	pages = get_pages(obj);
157c8afe684SRob Clark 	if (IS_ERR(pages)) {
158c8afe684SRob Clark 		ret = PTR_ERR(pages);
159c8afe684SRob Clark 		goto out_unlock;
160c8afe684SRob Clark 	}
161c8afe684SRob Clark 
162c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
163c8afe684SRob Clark 	pgoff = ((unsigned long)vmf->virtual_address -
164c8afe684SRob Clark 			vma->vm_start) >> PAGE_SHIFT;
165c8afe684SRob Clark 
166c8afe684SRob Clark 	pfn = page_to_pfn(msm_obj->pages[pgoff]);
167c8afe684SRob Clark 
168c8afe684SRob Clark 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
169c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
170c8afe684SRob Clark 
171c8afe684SRob Clark 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
172c8afe684SRob Clark 
173c8afe684SRob Clark out_unlock:
174c8afe684SRob Clark 	mutex_unlock(&dev->struct_mutex);
175c8afe684SRob Clark out:
176c8afe684SRob Clark 	switch (ret) {
177c8afe684SRob Clark 	case -EAGAIN:
178c8afe684SRob Clark 	case 0:
179c8afe684SRob Clark 	case -ERESTARTSYS:
180c8afe684SRob Clark 	case -EINTR:
181c8afe684SRob Clark 		return VM_FAULT_NOPAGE;
182c8afe684SRob Clark 	case -ENOMEM:
183c8afe684SRob Clark 		return VM_FAULT_OOM;
184c8afe684SRob Clark 	default:
185c8afe684SRob Clark 		return VM_FAULT_SIGBUS;
186c8afe684SRob Clark 	}
187c8afe684SRob Clark }
188c8afe684SRob Clark 
189c8afe684SRob Clark /** get mmap offset */
190c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
191c8afe684SRob Clark {
192c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
193c8afe684SRob Clark 	int ret;
194c8afe684SRob Clark 
195c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
196c8afe684SRob Clark 
197c8afe684SRob Clark 	/* Make it mmapable */
198c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
199c8afe684SRob Clark 
200c8afe684SRob Clark 	if (ret) {
201c8afe684SRob Clark 		dev_err(dev->dev, "could not allocate mmap offset\n");
202c8afe684SRob Clark 		return 0;
203c8afe684SRob Clark 	}
204c8afe684SRob Clark 
205c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
206c8afe684SRob Clark }
207c8afe684SRob Clark 
208c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
209c8afe684SRob Clark {
210c8afe684SRob Clark 	uint64_t offset;
211c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
212c8afe684SRob Clark 	offset = mmap_offset(obj);
213c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
214c8afe684SRob Clark 	return offset;
215c8afe684SRob Clark }
216c8afe684SRob Clark 
217c8afe684SRob Clark /* helpers for dealing w/ iommu: */
218c8afe684SRob Clark static int map_range(struct iommu_domain *domain, unsigned int iova,
219c8afe684SRob Clark 		struct sg_table *sgt, unsigned int len, int prot)
220c8afe684SRob Clark {
221c8afe684SRob Clark 	struct scatterlist *sg;
222c8afe684SRob Clark 	unsigned int da = iova;
223c8afe684SRob Clark 	unsigned int i, j;
224c8afe684SRob Clark 	int ret;
225c8afe684SRob Clark 
226c8afe684SRob Clark 	if (!domain || !sgt)
227c8afe684SRob Clark 		return -EINVAL;
228c8afe684SRob Clark 
229c8afe684SRob Clark 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
230c8afe684SRob Clark 		u32 pa = sg_phys(sg) - sg->offset;
231c8afe684SRob Clark 		size_t bytes = sg->length + sg->offset;
232c8afe684SRob Clark 
233c8afe684SRob Clark 		VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
234c8afe684SRob Clark 
235c8afe684SRob Clark 		ret = iommu_map(domain, da, pa, bytes, prot);
236c8afe684SRob Clark 		if (ret)
237c8afe684SRob Clark 			goto fail;
238c8afe684SRob Clark 
239c8afe684SRob Clark 		da += bytes;
240c8afe684SRob Clark 	}
241c8afe684SRob Clark 
242c8afe684SRob Clark 	return 0;
243c8afe684SRob Clark 
244c8afe684SRob Clark fail:
245c8afe684SRob Clark 	da = iova;
246c8afe684SRob Clark 
247c8afe684SRob Clark 	for_each_sg(sgt->sgl, sg, i, j) {
248c8afe684SRob Clark 		size_t bytes = sg->length + sg->offset;
249c8afe684SRob Clark 		iommu_unmap(domain, da, bytes);
250c8afe684SRob Clark 		da += bytes;
251c8afe684SRob Clark 	}
252c8afe684SRob Clark 	return ret;
253c8afe684SRob Clark }
254c8afe684SRob Clark 
255c8afe684SRob Clark static void unmap_range(struct iommu_domain *domain, unsigned int iova,
256c8afe684SRob Clark 		struct sg_table *sgt, unsigned int len)
257c8afe684SRob Clark {
258c8afe684SRob Clark 	struct scatterlist *sg;
259c8afe684SRob Clark 	unsigned int da = iova;
260c8afe684SRob Clark 	int i;
261c8afe684SRob Clark 
262c8afe684SRob Clark 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
263c8afe684SRob Clark 		size_t bytes = sg->length + sg->offset;
264c8afe684SRob Clark 		size_t unmapped;
265c8afe684SRob Clark 
266c8afe684SRob Clark 		unmapped = iommu_unmap(domain, da, bytes);
267c8afe684SRob Clark 		if (unmapped < bytes)
268c8afe684SRob Clark 			break;
269c8afe684SRob Clark 
270c8afe684SRob Clark 		VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
271c8afe684SRob Clark 
272c8afe684SRob Clark 		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
273c8afe684SRob Clark 
274c8afe684SRob Clark 		da += bytes;
275c8afe684SRob Clark 	}
276c8afe684SRob Clark }
277c8afe684SRob Clark 
278c8afe684SRob Clark /* should be called under struct_mutex.. although it can be called
279c8afe684SRob Clark  * from atomic context without struct_mutex to acquire an extra
280c8afe684SRob Clark  * iova ref if you know one is already held.
281c8afe684SRob Clark  *
282c8afe684SRob Clark  * That means when I do eventually need to add support for unpinning
283c8afe684SRob Clark  * the refcnt counter needs to be atomic_t.
284c8afe684SRob Clark  */
285c8afe684SRob Clark int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
286c8afe684SRob Clark 		uint32_t *iova)
287c8afe684SRob Clark {
288c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
289c8afe684SRob Clark 	int ret = 0;
290c8afe684SRob Clark 
291c8afe684SRob Clark 	if (!msm_obj->domain[id].iova) {
292c8afe684SRob Clark 		struct msm_drm_private *priv = obj->dev->dev_private;
293c8afe684SRob Clark 		uint32_t offset = (uint32_t)mmap_offset(obj);
294c8afe684SRob Clark 		struct page **pages;
295c8afe684SRob Clark 		pages = get_pages(obj);
296c8afe684SRob Clark 		if (IS_ERR(pages))
297c8afe684SRob Clark 			return PTR_ERR(pages);
298c8afe684SRob Clark 		// XXX ideally we would not map buffers writable when not needed...
299c8afe684SRob Clark 		ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
300c8afe684SRob Clark 				obj->size, IOMMU_READ | IOMMU_WRITE);
301c8afe684SRob Clark 		msm_obj->domain[id].iova = offset;
302c8afe684SRob Clark 	}
303c8afe684SRob Clark 
304c8afe684SRob Clark 	if (!ret)
305c8afe684SRob Clark 		*iova = msm_obj->domain[id].iova;
306c8afe684SRob Clark 
307c8afe684SRob Clark 	return ret;
308c8afe684SRob Clark }
309c8afe684SRob Clark 
310c8afe684SRob Clark int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
311c8afe684SRob Clark {
312edd4fc63SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
313c8afe684SRob Clark 	int ret;
314edd4fc63SRob Clark 
315edd4fc63SRob Clark 	/* this is safe right now because we don't unmap until the
316edd4fc63SRob Clark 	 * bo is deleted:
317edd4fc63SRob Clark 	 */
318edd4fc63SRob Clark 	if (msm_obj->domain[id].iova) {
319edd4fc63SRob Clark 		*iova = msm_obj->domain[id].iova;
320edd4fc63SRob Clark 		return 0;
321edd4fc63SRob Clark 	}
322edd4fc63SRob Clark 
323c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
324c8afe684SRob Clark 	ret = msm_gem_get_iova_locked(obj, id, iova);
325c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
326c8afe684SRob Clark 	return ret;
327c8afe684SRob Clark }
328c8afe684SRob Clark 
329c8afe684SRob Clark void msm_gem_put_iova(struct drm_gem_object *obj, int id)
330c8afe684SRob Clark {
331c8afe684SRob Clark 	// XXX TODO ..
332c8afe684SRob Clark 	// NOTE: probably don't need a _locked() version.. we wouldn't
333c8afe684SRob Clark 	// normally unmap here, but instead just mark that it could be
334c8afe684SRob Clark 	// unmapped (if the iova refcnt drops to zero), but then later
335c8afe684SRob Clark 	// if another _get_iova_locked() fails we can start unmapping
336c8afe684SRob Clark 	// things that are no longer needed..
337c8afe684SRob Clark }
338c8afe684SRob Clark 
339c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
340c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
341c8afe684SRob Clark {
342c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
343c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
344c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
345c8afe684SRob Clark 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
346c8afe684SRob Clark }
347c8afe684SRob Clark 
348c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
349c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
350c8afe684SRob Clark {
351c8afe684SRob Clark 	struct drm_gem_object *obj;
352c8afe684SRob Clark 	int ret = 0;
353c8afe684SRob Clark 
354c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
355c8afe684SRob Clark 	obj = drm_gem_object_lookup(dev, file, handle);
356c8afe684SRob Clark 	if (obj == NULL) {
357c8afe684SRob Clark 		ret = -ENOENT;
358c8afe684SRob Clark 		goto fail;
359c8afe684SRob Clark 	}
360c8afe684SRob Clark 
361c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
362c8afe684SRob Clark 
363c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
364c8afe684SRob Clark 
365c8afe684SRob Clark fail:
366c8afe684SRob Clark 	return ret;
367c8afe684SRob Clark }
368c8afe684SRob Clark 
369c8afe684SRob Clark void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
370c8afe684SRob Clark {
371c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
372c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
373c8afe684SRob Clark 	if (!msm_obj->vaddr) {
374c8afe684SRob Clark 		struct page **pages = get_pages(obj);
375c8afe684SRob Clark 		if (IS_ERR(pages))
376c8afe684SRob Clark 			return ERR_CAST(pages);
377c8afe684SRob Clark 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
378c8afe684SRob Clark 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
379c8afe684SRob Clark 	}
380c8afe684SRob Clark 	return msm_obj->vaddr;
381c8afe684SRob Clark }
382c8afe684SRob Clark 
383c8afe684SRob Clark void *msm_gem_vaddr(struct drm_gem_object *obj)
384c8afe684SRob Clark {
385c8afe684SRob Clark 	void *ret;
386c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
387c8afe684SRob Clark 	ret = msm_gem_vaddr_locked(obj);
388c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
389c8afe684SRob Clark 	return ret;
390c8afe684SRob Clark }
391c8afe684SRob Clark 
392edd4fc63SRob Clark /* setup callback for when bo is no longer busy..
393edd4fc63SRob Clark  * TODO probably want to differentiate read vs write..
394edd4fc63SRob Clark  */
395edd4fc63SRob Clark int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
396edd4fc63SRob Clark 		struct msm_fence_cb *cb)
397c8afe684SRob Clark {
398c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
399c8afe684SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
4007198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4017198e6b0SRob Clark 	int ret = 0;
402c8afe684SRob Clark 
4037198e6b0SRob Clark 	mutex_lock(&dev->struct_mutex);
404edd4fc63SRob Clark 	if (!list_empty(&cb->work.entry)) {
4057198e6b0SRob Clark 		ret = -EINVAL;
4067198e6b0SRob Clark 	} else if (is_active(msm_obj)) {
407edd4fc63SRob Clark 		cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
408edd4fc63SRob Clark 		list_add_tail(&cb->work.entry, &priv->fence_cbs);
4097198e6b0SRob Clark 	} else {
410edd4fc63SRob Clark 		queue_work(priv->wq, &cb->work);
4117198e6b0SRob Clark 	}
4127198e6b0SRob Clark 	mutex_unlock(&dev->struct_mutex);
413c8afe684SRob Clark 
4147198e6b0SRob Clark 	return ret;
4157198e6b0SRob Clark }
4167198e6b0SRob Clark 
4177198e6b0SRob Clark void msm_gem_move_to_active(struct drm_gem_object *obj,
418bf6811f3SRob Clark 		struct msm_gpu *gpu, bool write, uint32_t fence)
4197198e6b0SRob Clark {
4207198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4217198e6b0SRob Clark 	msm_obj->gpu = gpu;
422bf6811f3SRob Clark 	if (write)
423bf6811f3SRob Clark 		msm_obj->write_fence = fence;
424bf6811f3SRob Clark 	else
425bf6811f3SRob Clark 		msm_obj->read_fence = fence;
4267198e6b0SRob Clark 	list_del_init(&msm_obj->mm_list);
4277198e6b0SRob Clark 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
4287198e6b0SRob Clark }
4297198e6b0SRob Clark 
4307198e6b0SRob Clark void msm_gem_move_to_inactive(struct drm_gem_object *obj)
4317198e6b0SRob Clark {
4327198e6b0SRob Clark 	struct drm_device *dev = obj->dev;
4337198e6b0SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
4347198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4357198e6b0SRob Clark 
4367198e6b0SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4377198e6b0SRob Clark 
4387198e6b0SRob Clark 	msm_obj->gpu = NULL;
439bf6811f3SRob Clark 	msm_obj->read_fence = 0;
440bf6811f3SRob Clark 	msm_obj->write_fence = 0;
4417198e6b0SRob Clark 	list_del_init(&msm_obj->mm_list);
4427198e6b0SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
4437198e6b0SRob Clark }
4447198e6b0SRob Clark 
4457198e6b0SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
4467198e6b0SRob Clark 		struct timespec *timeout)
4477198e6b0SRob Clark {
4487198e6b0SRob Clark 	struct drm_device *dev = obj->dev;
4497198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4507198e6b0SRob Clark 	int ret = 0;
4517198e6b0SRob Clark 
452f816f272SRob Clark 	if (is_active(msm_obj)) {
453bf6811f3SRob Clark 		uint32_t fence = 0;
454f816f272SRob Clark 
455bf6811f3SRob Clark 		if (op & MSM_PREP_READ)
456bf6811f3SRob Clark 			fence = msm_obj->write_fence;
457bf6811f3SRob Clark 		if (op & MSM_PREP_WRITE)
458bf6811f3SRob Clark 			fence = max(fence, msm_obj->read_fence);
459f816f272SRob Clark 		if (op & MSM_PREP_NOSYNC)
460f816f272SRob Clark 			timeout = NULL;
461f816f272SRob Clark 
462bf6811f3SRob Clark 		ret = msm_wait_fence_interruptable(dev, fence, timeout);
463bf6811f3SRob Clark 	}
4647198e6b0SRob Clark 
4657198e6b0SRob Clark 	/* TODO cache maintenance */
4667198e6b0SRob Clark 
4677198e6b0SRob Clark 	return ret;
4687198e6b0SRob Clark }
4697198e6b0SRob Clark 
4707198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
4717198e6b0SRob Clark {
4727198e6b0SRob Clark 	/* TODO cache maintenance */
473c8afe684SRob Clark 	return 0;
474c8afe684SRob Clark }
475c8afe684SRob Clark 
476c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
477c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
478c8afe684SRob Clark {
479c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
480c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
481c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
482c8afe684SRob Clark 
483c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
484bf6811f3SRob Clark 	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
4857198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
486bf6811f3SRob Clark 			msm_obj->read_fence, msm_obj->write_fence,
487bf6811f3SRob Clark 			obj->name, obj->refcount.refcount.counter,
488c8afe684SRob Clark 			off, msm_obj->vaddr, obj->size);
489c8afe684SRob Clark }
490c8afe684SRob Clark 
491c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
492c8afe684SRob Clark {
493c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
494c8afe684SRob Clark 	int count = 0;
495c8afe684SRob Clark 	size_t size = 0;
496c8afe684SRob Clark 
497c8afe684SRob Clark 	list_for_each_entry(msm_obj, list, mm_list) {
498c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
499c8afe684SRob Clark 		seq_printf(m, "   ");
500c8afe684SRob Clark 		msm_gem_describe(obj, m);
501c8afe684SRob Clark 		count++;
502c8afe684SRob Clark 		size += obj->size;
503c8afe684SRob Clark 	}
504c8afe684SRob Clark 
505c8afe684SRob Clark 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
506c8afe684SRob Clark }
507c8afe684SRob Clark #endif
508c8afe684SRob Clark 
509c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
510c8afe684SRob Clark {
511c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
512c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
513c8afe684SRob Clark 	int id;
514c8afe684SRob Clark 
515c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
516c8afe684SRob Clark 
5177198e6b0SRob Clark 	/* object should not be on active list: */
5187198e6b0SRob Clark 	WARN_ON(is_active(msm_obj));
5197198e6b0SRob Clark 
520c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
521c8afe684SRob Clark 
522c8afe684SRob Clark 	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
523c8afe684SRob Clark 		if (msm_obj->domain[id].iova) {
524c8afe684SRob Clark 			struct msm_drm_private *priv = obj->dev->dev_private;
525c8afe684SRob Clark 			uint32_t offset = (uint32_t)mmap_offset(obj);
526c8afe684SRob Clark 			unmap_range(priv->iommus[id], offset,
527c8afe684SRob Clark 					msm_obj->sgt, obj->size);
528c8afe684SRob Clark 		}
529c8afe684SRob Clark 	}
530c8afe684SRob Clark 
531c8afe684SRob Clark 	drm_gem_free_mmap_offset(obj);
532c8afe684SRob Clark 
53305b84911SRob Clark 	if (obj->import_attach) {
53405b84911SRob Clark 		if (msm_obj->vaddr)
53505b84911SRob Clark 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
53605b84911SRob Clark 
53705b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
53805b84911SRob Clark 		 * ours, just free the array we allocated:
53905b84911SRob Clark 		 */
54005b84911SRob Clark 		if (msm_obj->pages)
54105b84911SRob Clark 			drm_free_large(msm_obj->pages);
54205b84911SRob Clark 
54305b84911SRob Clark 	} else {
544c8afe684SRob Clark 		if (msm_obj->vaddr)
545c8afe684SRob Clark 			vunmap(msm_obj->vaddr);
546c8afe684SRob Clark 		put_pages(obj);
54705b84911SRob Clark 	}
548c8afe684SRob Clark 
5497198e6b0SRob Clark 	if (msm_obj->resv == &msm_obj->_resv)
5507198e6b0SRob Clark 		reservation_object_fini(msm_obj->resv);
5517198e6b0SRob Clark 
552c8afe684SRob Clark 	drm_gem_object_release(obj);
553c8afe684SRob Clark 
554c8afe684SRob Clark 	kfree(msm_obj);
555c8afe684SRob Clark }
556c8afe684SRob Clark 
557c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
558c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
559c8afe684SRob Clark 		uint32_t size, uint32_t flags, uint32_t *handle)
560c8afe684SRob Clark {
561c8afe684SRob Clark 	struct drm_gem_object *obj;
562c8afe684SRob Clark 	int ret;
563c8afe684SRob Clark 
564c8afe684SRob Clark 	ret = mutex_lock_interruptible(&dev->struct_mutex);
565c8afe684SRob Clark 	if (ret)
566c8afe684SRob Clark 		return ret;
567c8afe684SRob Clark 
568c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
569c8afe684SRob Clark 
570c8afe684SRob Clark 	mutex_unlock(&dev->struct_mutex);
571c8afe684SRob Clark 
572c8afe684SRob Clark 	if (IS_ERR(obj))
573c8afe684SRob Clark 		return PTR_ERR(obj);
574c8afe684SRob Clark 
575c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
576c8afe684SRob Clark 
577c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
578c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
579c8afe684SRob Clark 
580c8afe684SRob Clark 	return ret;
581c8afe684SRob Clark }
582c8afe684SRob Clark 
58305b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
58405b84911SRob Clark 		uint32_t size, uint32_t flags,
58505b84911SRob Clark 		struct drm_gem_object **obj)
586c8afe684SRob Clark {
587c8afe684SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
588c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
589c8afe684SRob Clark 
590c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
591c8afe684SRob Clark 	case MSM_BO_UNCACHED:
592c8afe684SRob Clark 	case MSM_BO_CACHED:
593c8afe684SRob Clark 	case MSM_BO_WC:
594c8afe684SRob Clark 		break;
595c8afe684SRob Clark 	default:
596c8afe684SRob Clark 		dev_err(dev->dev, "invalid cache flag: %x\n",
597c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
59805b84911SRob Clark 		return -EINVAL;
599c8afe684SRob Clark 	}
600c8afe684SRob Clark 
601c8afe684SRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
60205b84911SRob Clark 	if (!msm_obj)
60305b84911SRob Clark 		return -ENOMEM;
604c8afe684SRob Clark 
605c8afe684SRob Clark 	msm_obj->flags = flags;
606c8afe684SRob Clark 
6077198e6b0SRob Clark 	msm_obj->resv = &msm_obj->_resv;
6087198e6b0SRob Clark 	reservation_object_init(msm_obj->resv);
609c8afe684SRob Clark 
6107198e6b0SRob Clark 	INIT_LIST_HEAD(&msm_obj->submit_entry);
611c8afe684SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
612c8afe684SRob Clark 
61305b84911SRob Clark 	*obj = &msm_obj->base;
61405b84911SRob Clark 
61505b84911SRob Clark 	return 0;
61605b84911SRob Clark }
61705b84911SRob Clark 
61805b84911SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev,
61905b84911SRob Clark 		uint32_t size, uint32_t flags)
62005b84911SRob Clark {
62105b84911SRob Clark 	struct drm_gem_object *obj;
62205b84911SRob Clark 	int ret;
62305b84911SRob Clark 
62405b84911SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
62505b84911SRob Clark 
62605b84911SRob Clark 	size = PAGE_ALIGN(size);
62705b84911SRob Clark 
62805b84911SRob Clark 	ret = msm_gem_new_impl(dev, size, flags, &obj);
62905b84911SRob Clark 	if (ret)
63005b84911SRob Clark 		goto fail;
63105b84911SRob Clark 
63205b84911SRob Clark 	ret = drm_gem_object_init(dev, obj, size);
63305b84911SRob Clark 	if (ret)
63405b84911SRob Clark 		goto fail;
63505b84911SRob Clark 
63605b84911SRob Clark 	return obj;
63705b84911SRob Clark 
63805b84911SRob Clark fail:
63905b84911SRob Clark 	if (obj)
64005b84911SRob Clark 		drm_gem_object_unreference_unlocked(obj);
64105b84911SRob Clark 
64205b84911SRob Clark 	return ERR_PTR(ret);
64305b84911SRob Clark }
64405b84911SRob Clark 
64505b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
64605b84911SRob Clark 		uint32_t size, struct sg_table *sgt)
64705b84911SRob Clark {
64805b84911SRob Clark 	struct msm_gem_object *msm_obj;
64905b84911SRob Clark 	struct drm_gem_object *obj;
65005b84911SRob Clark 	int ret, npages;
65105b84911SRob Clark 
65205b84911SRob Clark 	size = PAGE_ALIGN(size);
65305b84911SRob Clark 
65405b84911SRob Clark 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
65505b84911SRob Clark 	if (ret)
65605b84911SRob Clark 		goto fail;
65705b84911SRob Clark 
65805b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
65905b84911SRob Clark 
66005b84911SRob Clark 	npages = size / PAGE_SIZE;
66105b84911SRob Clark 
66205b84911SRob Clark 	msm_obj = to_msm_bo(obj);
66305b84911SRob Clark 	msm_obj->sgt = sgt;
66405b84911SRob Clark 	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
66505b84911SRob Clark 	if (!msm_obj->pages) {
66605b84911SRob Clark 		ret = -ENOMEM;
66705b84911SRob Clark 		goto fail;
66805b84911SRob Clark 	}
66905b84911SRob Clark 
67005b84911SRob Clark 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
67105b84911SRob Clark 	if (ret)
67205b84911SRob Clark 		goto fail;
67305b84911SRob Clark 
674c8afe684SRob Clark 	return obj;
675c8afe684SRob Clark 
676c8afe684SRob Clark fail:
677c8afe684SRob Clark 	if (obj)
678c8afe684SRob Clark 		drm_gem_object_unreference_unlocked(obj);
679c8afe684SRob Clark 
680c8afe684SRob Clark 	return ERR_PTR(ret);
681c8afe684SRob Clark }
682