1f6ffbd4fSLucas Stach // SPDX-License-Identifier: GPL-2.0
2a8c21a54SThe etnaviv authors /*
3f6ffbd4fSLucas Stach  * Copyright (C) 2015-2018 Etnaviv Project
4a8c21a54SThe etnaviv authors  */
5a8c21a54SThe etnaviv authors 
66eae41feSSam Ravnborg #include <drm/drm_prime.h>
76eae41feSSam Ravnborg #include <linux/dma-mapping.h>
86eae41feSSam Ravnborg #include <linux/shmem_fs.h>
96eae41feSSam Ravnborg #include <linux/spinlock.h>
106eae41feSSam Ravnborg #include <linux/vmalloc.h>
11a8c21a54SThe etnaviv authors 
12a8c21a54SThe etnaviv authors #include "etnaviv_drv.h"
13a8c21a54SThe etnaviv authors #include "etnaviv_gem.h"
14a8c21a54SThe etnaviv authors #include "etnaviv_gpu.h"
15a8c21a54SThe etnaviv authors #include "etnaviv_mmu.h"
16a8c21a54SThe etnaviv authors 
17d6a8743dSLucas Stach static struct lock_class_key etnaviv_shm_lock_class;
18d6a8743dSLucas Stach static struct lock_class_key etnaviv_userptr_lock_class;
19d6a8743dSLucas Stach 
20a8c21a54SThe etnaviv authors static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21a8c21a54SThe etnaviv authors {
22a8c21a54SThe etnaviv authors 	struct drm_device *dev = etnaviv_obj->base.dev;
23a8c21a54SThe etnaviv authors 	struct sg_table *sgt = etnaviv_obj->sgt;
24a8c21a54SThe etnaviv authors 
25a8c21a54SThe etnaviv authors 	/*
26a8c21a54SThe etnaviv authors 	 * For non-cached buffers, ensure the new pages are clean
27a8c21a54SThe etnaviv authors 	 * because display controller, GPU, etc. are not coherent.
28a8c21a54SThe etnaviv authors 	 */
29a8c21a54SThe etnaviv authors 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30a8c21a54SThe etnaviv authors 		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
31a8c21a54SThe etnaviv authors }
32a8c21a54SThe etnaviv authors 
33a8c21a54SThe etnaviv authors static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34a8c21a54SThe etnaviv authors {
35a8c21a54SThe etnaviv authors 	struct drm_device *dev = etnaviv_obj->base.dev;
36a8c21a54SThe etnaviv authors 	struct sg_table *sgt = etnaviv_obj->sgt;
37a8c21a54SThe etnaviv authors 
38a8c21a54SThe etnaviv authors 	/*
39a8c21a54SThe etnaviv authors 	 * For non-cached buffers, ensure the new pages are clean
40a8c21a54SThe etnaviv authors 	 * because display controller, GPU, etc. are not coherent:
41a8c21a54SThe etnaviv authors 	 *
42a8c21a54SThe etnaviv authors 	 * WARNING: The DMA API does not support concurrent CPU
43a8c21a54SThe etnaviv authors 	 * and device access to the memory area.  With BIDIRECTIONAL,
44a8c21a54SThe etnaviv authors 	 * we will clean the cache lines which overlap the region,
45a8c21a54SThe etnaviv authors 	 * and invalidate all cache lines (partially) contained in
46a8c21a54SThe etnaviv authors 	 * the region.
47a8c21a54SThe etnaviv authors 	 *
48a8c21a54SThe etnaviv authors 	 * If you have dirty data in the overlapping cache lines,
49a8c21a54SThe etnaviv authors 	 * that will corrupt the GPU-written data.  If you have
50a8c21a54SThe etnaviv authors 	 * written into the remainder of the region, this can
51a8c21a54SThe etnaviv authors 	 * discard those writes.
52a8c21a54SThe etnaviv authors 	 */
53a8c21a54SThe etnaviv authors 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54a8c21a54SThe etnaviv authors 		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
55a8c21a54SThe etnaviv authors }
56a8c21a54SThe etnaviv authors 
57a8c21a54SThe etnaviv authors /* called with etnaviv_obj->lock held */
58a8c21a54SThe etnaviv authors static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59a8c21a54SThe etnaviv authors {
60a8c21a54SThe etnaviv authors 	struct drm_device *dev = etnaviv_obj->base.dev;
61a8c21a54SThe etnaviv authors 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62a8c21a54SThe etnaviv authors 
63a8c21a54SThe etnaviv authors 	if (IS_ERR(p)) {
64f91ac470SLucas Stach 		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65a8c21a54SThe etnaviv authors 		return PTR_ERR(p);
66a8c21a54SThe etnaviv authors 	}
67a8c21a54SThe etnaviv authors 
68a8c21a54SThe etnaviv authors 	etnaviv_obj->pages = p;
69a8c21a54SThe etnaviv authors 
70a8c21a54SThe etnaviv authors 	return 0;
71a8c21a54SThe etnaviv authors }
72a8c21a54SThe etnaviv authors 
73a8c21a54SThe etnaviv authors static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74a8c21a54SThe etnaviv authors {
75a8c21a54SThe etnaviv authors 	if (etnaviv_obj->sgt) {
76a8c21a54SThe etnaviv authors 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77a8c21a54SThe etnaviv authors 		sg_free_table(etnaviv_obj->sgt);
78a8c21a54SThe etnaviv authors 		kfree(etnaviv_obj->sgt);
79a8c21a54SThe etnaviv authors 		etnaviv_obj->sgt = NULL;
80a8c21a54SThe etnaviv authors 	}
81a8c21a54SThe etnaviv authors 	if (etnaviv_obj->pages) {
82a8c21a54SThe etnaviv authors 		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83a8c21a54SThe etnaviv authors 				  true, false);
84a8c21a54SThe etnaviv authors 
85a8c21a54SThe etnaviv authors 		etnaviv_obj->pages = NULL;
86a8c21a54SThe etnaviv authors 	}
87a8c21a54SThe etnaviv authors }
88a8c21a54SThe etnaviv authors 
89a8c21a54SThe etnaviv authors struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90a8c21a54SThe etnaviv authors {
91a8c21a54SThe etnaviv authors 	int ret;
92a8c21a54SThe etnaviv authors 
93a8c21a54SThe etnaviv authors 	lockdep_assert_held(&etnaviv_obj->lock);
94a8c21a54SThe etnaviv authors 
95a8c21a54SThe etnaviv authors 	if (!etnaviv_obj->pages) {
96a8c21a54SThe etnaviv authors 		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97a8c21a54SThe etnaviv authors 		if (ret < 0)
98a8c21a54SThe etnaviv authors 			return ERR_PTR(ret);
99a8c21a54SThe etnaviv authors 	}
100a8c21a54SThe etnaviv authors 
101a8c21a54SThe etnaviv authors 	if (!etnaviv_obj->sgt) {
102a8c21a54SThe etnaviv authors 		struct drm_device *dev = etnaviv_obj->base.dev;
103a8c21a54SThe etnaviv authors 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104a8c21a54SThe etnaviv authors 		struct sg_table *sgt;
105a8c21a54SThe etnaviv authors 
106a8c21a54SThe etnaviv authors 		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
107a8c21a54SThe etnaviv authors 		if (IS_ERR(sgt)) {
108a8c21a54SThe etnaviv authors 			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109a8c21a54SThe etnaviv authors 				PTR_ERR(sgt));
110a8c21a54SThe etnaviv authors 			return ERR_CAST(sgt);
111a8c21a54SThe etnaviv authors 		}
112a8c21a54SThe etnaviv authors 
113a8c21a54SThe etnaviv authors 		etnaviv_obj->sgt = sgt;
114a8c21a54SThe etnaviv authors 
115a8c21a54SThe etnaviv authors 		etnaviv_gem_scatter_map(etnaviv_obj);
116a8c21a54SThe etnaviv authors 	}
117a8c21a54SThe etnaviv authors 
118a8c21a54SThe etnaviv authors 	return etnaviv_obj->pages;
119a8c21a54SThe etnaviv authors }
120a8c21a54SThe etnaviv authors 
121a8c21a54SThe etnaviv authors void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
122a8c21a54SThe etnaviv authors {
123a8c21a54SThe etnaviv authors 	lockdep_assert_held(&etnaviv_obj->lock);
124a8c21a54SThe etnaviv authors 	/* when we start tracking the pin count, then do something here */
125a8c21a54SThe etnaviv authors }
126a8c21a54SThe etnaviv authors 
1270e7f26e6SLucas Stach static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128a8c21a54SThe etnaviv authors 		struct vm_area_struct *vma)
129a8c21a54SThe etnaviv authors {
130a8c21a54SThe etnaviv authors 	pgprot_t vm_page_prot;
131a8c21a54SThe etnaviv authors 
132a8c21a54SThe etnaviv authors 	vma->vm_flags &= ~VM_PFNMAP;
133a8c21a54SThe etnaviv authors 	vma->vm_flags |= VM_MIXEDMAP;
134a8c21a54SThe etnaviv authors 
135a8c21a54SThe etnaviv authors 	vm_page_prot = vm_get_page_prot(vma->vm_flags);
136a8c21a54SThe etnaviv authors 
137a8c21a54SThe etnaviv authors 	if (etnaviv_obj->flags & ETNA_BO_WC) {
138a8c21a54SThe etnaviv authors 		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139a8c21a54SThe etnaviv authors 	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140a8c21a54SThe etnaviv authors 		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141a8c21a54SThe etnaviv authors 	} else {
142a8c21a54SThe etnaviv authors 		/*
143a8c21a54SThe etnaviv authors 		 * Shunt off cached objs to shmem file so they have their own
144a8c21a54SThe etnaviv authors 		 * address_space (so unmap_mapping_range does what we want,
145a8c21a54SThe etnaviv authors 		 * in particular in the case of mmap'd dmabufs)
146a8c21a54SThe etnaviv authors 		 */
147a8c21a54SThe etnaviv authors 		fput(vma->vm_file);
1480e7f26e6SLucas Stach 		get_file(etnaviv_obj->base.filp);
149a8c21a54SThe etnaviv authors 		vma->vm_pgoff = 0;
1500e7f26e6SLucas Stach 		vma->vm_file  = etnaviv_obj->base.filp;
151a8c21a54SThe etnaviv authors 
152a8c21a54SThe etnaviv authors 		vma->vm_page_prot = vm_page_prot;
153a8c21a54SThe etnaviv authors 	}
154a8c21a54SThe etnaviv authors 
155a8c21a54SThe etnaviv authors 	return 0;
156a8c21a54SThe etnaviv authors }
157a8c21a54SThe etnaviv authors 
158a8c21a54SThe etnaviv authors int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
159a8c21a54SThe etnaviv authors {
160a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *obj;
161a8c21a54SThe etnaviv authors 	int ret;
162a8c21a54SThe etnaviv authors 
163a8c21a54SThe etnaviv authors 	ret = drm_gem_mmap(filp, vma);
164a8c21a54SThe etnaviv authors 	if (ret) {
165a8c21a54SThe etnaviv authors 		DBG("mmap failed: %d", ret);
166a8c21a54SThe etnaviv authors 		return ret;
167a8c21a54SThe etnaviv authors 	}
168a8c21a54SThe etnaviv authors 
169a8c21a54SThe etnaviv authors 	obj = to_etnaviv_bo(vma->vm_private_data);
170a10e2bdeSLucas Stach 	return obj->ops->mmap(obj, vma);
171a8c21a54SThe etnaviv authors }
172a8c21a54SThe etnaviv authors 
173cfad05a2SSouptick Joarder vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
174a8c21a54SThe etnaviv authors {
17511bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
176a8c21a54SThe etnaviv authors 	struct drm_gem_object *obj = vma->vm_private_data;
177a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
178a8c21a54SThe etnaviv authors 	struct page **pages, *page;
179a8c21a54SThe etnaviv authors 	pgoff_t pgoff;
180cfad05a2SSouptick Joarder 	int err;
181a8c21a54SThe etnaviv authors 
182a8c21a54SThe etnaviv authors 	/*
183a8c21a54SThe etnaviv authors 	 * Make sure we don't parallel update on a fault, nor move or remove
184cfad05a2SSouptick Joarder 	 * something from beneath our feet.  Note that vmf_insert_page() is
185a8c21a54SThe etnaviv authors 	 * specifically coded to take care of this, so we don't have to.
186a8c21a54SThe etnaviv authors 	 */
187cfad05a2SSouptick Joarder 	err = mutex_lock_interruptible(&etnaviv_obj->lock);
188cfad05a2SSouptick Joarder 	if (err)
189cfad05a2SSouptick Joarder 		return VM_FAULT_NOPAGE;
190a8c21a54SThe etnaviv authors 	/* make sure we have pages attached now */
191a8c21a54SThe etnaviv authors 	pages = etnaviv_gem_get_pages(etnaviv_obj);
192a8c21a54SThe etnaviv authors 	mutex_unlock(&etnaviv_obj->lock);
193a8c21a54SThe etnaviv authors 
194a8c21a54SThe etnaviv authors 	if (IS_ERR(pages)) {
195cfad05a2SSouptick Joarder 		err = PTR_ERR(pages);
196cfad05a2SSouptick Joarder 		return vmf_error(err);
197a8c21a54SThe etnaviv authors 	}
198a8c21a54SThe etnaviv authors 
199a8c21a54SThe etnaviv authors 	/* We don't use vmf->pgoff since that has the fake offset: */
2001a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
201a8c21a54SThe etnaviv authors 
202a8c21a54SThe etnaviv authors 	page = pages[pgoff];
203a8c21a54SThe etnaviv authors 
2041a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
205a8c21a54SThe etnaviv authors 	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
206a8c21a54SThe etnaviv authors 
207cfad05a2SSouptick Joarder 	return vmf_insert_page(vma, vmf->address, page);
208a8c21a54SThe etnaviv authors }
209a8c21a54SThe etnaviv authors 
210a8c21a54SThe etnaviv authors int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
211a8c21a54SThe etnaviv authors {
212a8c21a54SThe etnaviv authors 	int ret;
213a8c21a54SThe etnaviv authors 
214a8c21a54SThe etnaviv authors 	/* Make it mmapable */
215a8c21a54SThe etnaviv authors 	ret = drm_gem_create_mmap_offset(obj);
216a8c21a54SThe etnaviv authors 	if (ret)
217a8c21a54SThe etnaviv authors 		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
218a8c21a54SThe etnaviv authors 	else
219a8c21a54SThe etnaviv authors 		*offset = drm_vma_node_offset_addr(&obj->vma_node);
220a8c21a54SThe etnaviv authors 
221a8c21a54SThe etnaviv authors 	return ret;
222a8c21a54SThe etnaviv authors }
223a8c21a54SThe etnaviv authors 
224a8c21a54SThe etnaviv authors static struct etnaviv_vram_mapping *
225a8c21a54SThe etnaviv authors etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
22627b67278SLucas Stach 			     struct etnaviv_iommu_context *context)
227a8c21a54SThe etnaviv authors {
228a8c21a54SThe etnaviv authors 	struct etnaviv_vram_mapping *mapping;
229a8c21a54SThe etnaviv authors 
230a8c21a54SThe etnaviv authors 	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
23127b67278SLucas Stach 		if (mapping->context == context)
232a8c21a54SThe etnaviv authors 			return mapping;
233a8c21a54SThe etnaviv authors 	}
234a8c21a54SThe etnaviv authors 
235a8c21a54SThe etnaviv authors 	return NULL;
236a8c21a54SThe etnaviv authors }
237a8c21a54SThe etnaviv authors 
238b6325f40SRussell King void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
239b6325f40SRussell King {
240b6325f40SRussell King 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
241b6325f40SRussell King 
242b6325f40SRussell King 	mutex_lock(&etnaviv_obj->lock);
243b6325f40SRussell King 	WARN_ON(mapping->use == 0);
244b6325f40SRussell King 	mapping->use -= 1;
245b6325f40SRussell King 	mutex_unlock(&etnaviv_obj->lock);
246b6325f40SRussell King 
24723d1dd03SCihangir Akturk 	drm_gem_object_put_unlocked(&etnaviv_obj->base);
248b6325f40SRussell King }
249b6325f40SRussell King 
250b6325f40SRussell King struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
251088880ddSLucas Stach 	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
252088880ddSLucas Stach 	u64 va)
253a8c21a54SThe etnaviv authors {
254a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
255a8c21a54SThe etnaviv authors 	struct etnaviv_vram_mapping *mapping;
256a8c21a54SThe etnaviv authors 	struct page **pages;
257a8c21a54SThe etnaviv authors 	int ret = 0;
258a8c21a54SThe etnaviv authors 
259a8c21a54SThe etnaviv authors 	mutex_lock(&etnaviv_obj->lock);
260e6364d70SLucas Stach 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
261a8c21a54SThe etnaviv authors 	if (mapping) {
262a8c21a54SThe etnaviv authors 		/*
263a8c21a54SThe etnaviv authors 		 * Holding the object lock prevents the use count changing
264a8c21a54SThe etnaviv authors 		 * beneath us.  If the use count is zero, the MMU might be
265a8c21a54SThe etnaviv authors 		 * reaping this object, so take the lock and re-check that
266a8c21a54SThe etnaviv authors 		 * the MMU owns this mapping to close this race.
267a8c21a54SThe etnaviv authors 		 */
268a8c21a54SThe etnaviv authors 		if (mapping->use == 0) {
269e6364d70SLucas Stach 			mutex_lock(&mmu_context->lock);
270e6364d70SLucas Stach 			if (mapping->context == mmu_context)
271a8c21a54SThe etnaviv authors 				mapping->use += 1;
272a8c21a54SThe etnaviv authors 			else
273a8c21a54SThe etnaviv authors 				mapping = NULL;
274e6364d70SLucas Stach 			mutex_unlock(&mmu_context->lock);
275a8c21a54SThe etnaviv authors 			if (mapping)
276a8c21a54SThe etnaviv authors 				goto out;
277a8c21a54SThe etnaviv authors 		} else {
278a8c21a54SThe etnaviv authors 			mapping->use += 1;
279a8c21a54SThe etnaviv authors 			goto out;
280a8c21a54SThe etnaviv authors 		}
281a8c21a54SThe etnaviv authors 	}
282a8c21a54SThe etnaviv authors 
283a8c21a54SThe etnaviv authors 	pages = etnaviv_gem_get_pages(etnaviv_obj);
284a8c21a54SThe etnaviv authors 	if (IS_ERR(pages)) {
285a8c21a54SThe etnaviv authors 		ret = PTR_ERR(pages);
286a8c21a54SThe etnaviv authors 		goto out;
287a8c21a54SThe etnaviv authors 	}
288a8c21a54SThe etnaviv authors 
289a8c21a54SThe etnaviv authors 	/*
290a8c21a54SThe etnaviv authors 	 * See if we have a reaped vram mapping we can re-use before
291a8c21a54SThe etnaviv authors 	 * allocating a fresh mapping.
292a8c21a54SThe etnaviv authors 	 */
293a8c21a54SThe etnaviv authors 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
294a8c21a54SThe etnaviv authors 	if (!mapping) {
295a8c21a54SThe etnaviv authors 		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
296ed94add0SDan Carpenter 		if (!mapping) {
297ed94add0SDan Carpenter 			ret = -ENOMEM;
298ed94add0SDan Carpenter 			goto out;
299ed94add0SDan Carpenter 		}
300a8c21a54SThe etnaviv authors 
301a8c21a54SThe etnaviv authors 		INIT_LIST_HEAD(&mapping->scan_node);
302a8c21a54SThe etnaviv authors 		mapping->object = etnaviv_obj;
303a8c21a54SThe etnaviv authors 	} else {
304a8c21a54SThe etnaviv authors 		list_del(&mapping->obj_node);
305a8c21a54SThe etnaviv authors 	}
306a8c21a54SThe etnaviv authors 
307e6364d70SLucas Stach 	etnaviv_iommu_context_get(mmu_context);
308e6364d70SLucas Stach 	mapping->context = mmu_context;
309a8c21a54SThe etnaviv authors 	mapping->use = 1;
310a8c21a54SThe etnaviv authors 
31117e4660aSLucas Stach 	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
31217eae23bSLucas Stach 				    mmu_context->global->memory_base,
313088880ddSLucas Stach 				    mapping, va);
314e6364d70SLucas Stach 	if (ret < 0) {
315e6364d70SLucas Stach 		etnaviv_iommu_context_put(mmu_context);
316a8c21a54SThe etnaviv authors 		kfree(mapping);
317e6364d70SLucas Stach 	} else {
318a8c21a54SThe etnaviv authors 		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
319e6364d70SLucas Stach 	}
320a8c21a54SThe etnaviv authors 
321a8c21a54SThe etnaviv authors out:
322a8c21a54SThe etnaviv authors 	mutex_unlock(&etnaviv_obj->lock);
323a8c21a54SThe etnaviv authors 
324b6325f40SRussell King 	if (ret)
325b6325f40SRussell King 		return ERR_PTR(ret);
326b6325f40SRussell King 
327a8c21a54SThe etnaviv authors 	/* Take a reference on the object */
32823d1dd03SCihangir Akturk 	drm_gem_object_get(obj);
329b6325f40SRussell King 	return mapping;
330a8c21a54SThe etnaviv authors }
331a8c21a54SThe etnaviv authors 
332ce3088fdSLucas Stach void *etnaviv_gem_vmap(struct drm_gem_object *obj)
333a8c21a54SThe etnaviv authors {
334a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
335a8c21a54SThe etnaviv authors 
336a0a5ab3eSLucas Stach 	if (etnaviv_obj->vaddr)
337a0a5ab3eSLucas Stach 		return etnaviv_obj->vaddr;
338a0a5ab3eSLucas Stach 
339a8c21a54SThe etnaviv authors 	mutex_lock(&etnaviv_obj->lock);
340a0a5ab3eSLucas Stach 	/*
341a0a5ab3eSLucas Stach 	 * Need to check again, as we might have raced with another thread
342a0a5ab3eSLucas Stach 	 * while waiting for the mutex.
343a0a5ab3eSLucas Stach 	 */
344a0a5ab3eSLucas Stach 	if (!etnaviv_obj->vaddr)
345a0a5ab3eSLucas Stach 		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
346a8c21a54SThe etnaviv authors 	mutex_unlock(&etnaviv_obj->lock);
347a8c21a54SThe etnaviv authors 
348a8c21a54SThe etnaviv authors 	return etnaviv_obj->vaddr;
349a8c21a54SThe etnaviv authors }
350a8c21a54SThe etnaviv authors 
351a0a5ab3eSLucas Stach static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
352a0a5ab3eSLucas Stach {
353a0a5ab3eSLucas Stach 	struct page **pages;
354a0a5ab3eSLucas Stach 
355a0a5ab3eSLucas Stach 	lockdep_assert_held(&obj->lock);
356a0a5ab3eSLucas Stach 
357a0a5ab3eSLucas Stach 	pages = etnaviv_gem_get_pages(obj);
358a0a5ab3eSLucas Stach 	if (IS_ERR(pages))
359a0a5ab3eSLucas Stach 		return NULL;
360a0a5ab3eSLucas Stach 
361a0a5ab3eSLucas Stach 	return vmap(pages, obj->base.size >> PAGE_SHIFT,
362a0a5ab3eSLucas Stach 			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
363a0a5ab3eSLucas Stach }
364a0a5ab3eSLucas Stach 
365a8c21a54SThe etnaviv authors static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
366a8c21a54SThe etnaviv authors {
367a8c21a54SThe etnaviv authors 	if (op & ETNA_PREP_READ)
368a8c21a54SThe etnaviv authors 		return DMA_FROM_DEVICE;
369a8c21a54SThe etnaviv authors 	else if (op & ETNA_PREP_WRITE)
370a8c21a54SThe etnaviv authors 		return DMA_TO_DEVICE;
371a8c21a54SThe etnaviv authors 	else
372a8c21a54SThe etnaviv authors 		return DMA_BIDIRECTIONAL;
373a8c21a54SThe etnaviv authors }
374a8c21a54SThe etnaviv authors 
375a8c21a54SThe etnaviv authors int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
376a8c21a54SThe etnaviv authors 		struct timespec *timeout)
377a8c21a54SThe etnaviv authors {
378a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
379a8c21a54SThe etnaviv authors 	struct drm_device *dev = obj->dev;
380a8c21a54SThe etnaviv authors 	bool write = !!(op & ETNA_PREP_WRITE);
38146a269daSLucas Stach 	int ret;
382a8c21a54SThe etnaviv authors 
3838cc47b3eSLucas Stach 	if (!etnaviv_obj->sgt) {
3848cc47b3eSLucas Stach 		void *ret;
3858cc47b3eSLucas Stach 
3868cc47b3eSLucas Stach 		mutex_lock(&etnaviv_obj->lock);
3878cc47b3eSLucas Stach 		ret = etnaviv_gem_get_pages(etnaviv_obj);
3888cc47b3eSLucas Stach 		mutex_unlock(&etnaviv_obj->lock);
3898cc47b3eSLucas Stach 		if (IS_ERR(ret))
3908cc47b3eSLucas Stach 			return PTR_ERR(ret);
3918cc47b3eSLucas Stach 	}
3928cc47b3eSLucas Stach 
39346a269daSLucas Stach 	if (op & ETNA_PREP_NOSYNC) {
394fa238ea1SRob Herring 		if (!reservation_object_test_signaled_rcu(obj->resv,
39546a269daSLucas Stach 							  write))
39646a269daSLucas Stach 			return -EBUSY;
39746a269daSLucas Stach 	} else {
39846a269daSLucas Stach 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
39946a269daSLucas Stach 
400fa238ea1SRob Herring 		ret = reservation_object_wait_timeout_rcu(obj->resv,
401a8c21a54SThe etnaviv authors 							  write, true, remain);
40246a269daSLucas Stach 		if (ret <= 0)
40346a269daSLucas Stach 			return ret == 0 ? -ETIMEDOUT : ret;
40446a269daSLucas Stach 	}
405a8c21a54SThe etnaviv authors 
406a8c21a54SThe etnaviv authors 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
407a8c21a54SThe etnaviv authors 		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
408a8c21a54SThe etnaviv authors 				    etnaviv_obj->sgt->nents,
409a8c21a54SThe etnaviv authors 				    etnaviv_op_to_dma_dir(op));
410a8c21a54SThe etnaviv authors 		etnaviv_obj->last_cpu_prep_op = op;
411a8c21a54SThe etnaviv authors 	}
412a8c21a54SThe etnaviv authors 
413a8c21a54SThe etnaviv authors 	return 0;
414a8c21a54SThe etnaviv authors }
415a8c21a54SThe etnaviv authors 
416a8c21a54SThe etnaviv authors int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
417a8c21a54SThe etnaviv authors {
418a8c21a54SThe etnaviv authors 	struct drm_device *dev = obj->dev;
419a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
420a8c21a54SThe etnaviv authors 
421a8c21a54SThe etnaviv authors 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
422a8c21a54SThe etnaviv authors 		/* fini without a prep is almost certainly a userspace error */
423a8c21a54SThe etnaviv authors 		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
424a8c21a54SThe etnaviv authors 		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
425a8c21a54SThe etnaviv authors 			etnaviv_obj->sgt->nents,
426a8c21a54SThe etnaviv authors 			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
427a8c21a54SThe etnaviv authors 		etnaviv_obj->last_cpu_prep_op = 0;
428a8c21a54SThe etnaviv authors 	}
429a8c21a54SThe etnaviv authors 
430a8c21a54SThe etnaviv authors 	return 0;
431a8c21a54SThe etnaviv authors }
432a8c21a54SThe etnaviv authors 
433a8c21a54SThe etnaviv authors int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
434a8c21a54SThe etnaviv authors 	struct timespec *timeout)
435a8c21a54SThe etnaviv authors {
436a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
437a8c21a54SThe etnaviv authors 
438a8c21a54SThe etnaviv authors 	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
439a8c21a54SThe etnaviv authors }
440a8c21a54SThe etnaviv authors 
441a8c21a54SThe etnaviv authors #ifdef CONFIG_DEBUG_FS
442f54d1867SChris Wilson static void etnaviv_gem_describe_fence(struct dma_fence *fence,
443a8c21a54SThe etnaviv authors 	const char *type, struct seq_file *m)
444a8c21a54SThe etnaviv authors {
445f54d1867SChris Wilson 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
4463415701aSChristian König 		seq_printf(m, "\t%9s: %s %s seq %llu\n",
447a8c21a54SThe etnaviv authors 			   type,
448a8c21a54SThe etnaviv authors 			   fence->ops->get_driver_name(fence),
449a8c21a54SThe etnaviv authors 			   fence->ops->get_timeline_name(fence),
450a8c21a54SThe etnaviv authors 			   fence->seqno);
451a8c21a54SThe etnaviv authors }
452a8c21a54SThe etnaviv authors 
453a8c21a54SThe etnaviv authors static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
454a8c21a54SThe etnaviv authors {
455a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
456fa238ea1SRob Herring 	struct reservation_object *robj = obj->resv;
457a8c21a54SThe etnaviv authors 	struct reservation_object_list *fobj;
458f54d1867SChris Wilson 	struct dma_fence *fence;
459a8c21a54SThe etnaviv authors 	unsigned long off = drm_vma_node_start(&obj->vma_node);
460a8c21a54SThe etnaviv authors 
461a8c21a54SThe etnaviv authors 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
462a8c21a54SThe etnaviv authors 			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
4632c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
464a8c21a54SThe etnaviv authors 			off, etnaviv_obj->vaddr, obj->size);
465a8c21a54SThe etnaviv authors 
466a8c21a54SThe etnaviv authors 	rcu_read_lock();
467a8c21a54SThe etnaviv authors 	fobj = rcu_dereference(robj->fence);
468a8c21a54SThe etnaviv authors 	if (fobj) {
469a8c21a54SThe etnaviv authors 		unsigned int i, shared_count = fobj->shared_count;
470a8c21a54SThe etnaviv authors 
471a8c21a54SThe etnaviv authors 		for (i = 0; i < shared_count; i++) {
472a8c21a54SThe etnaviv authors 			fence = rcu_dereference(fobj->shared[i]);
473a8c21a54SThe etnaviv authors 			etnaviv_gem_describe_fence(fence, "Shared", m);
474a8c21a54SThe etnaviv authors 		}
475a8c21a54SThe etnaviv authors 	}
476a8c21a54SThe etnaviv authors 
477a8c21a54SThe etnaviv authors 	fence = rcu_dereference(robj->fence_excl);
478a8c21a54SThe etnaviv authors 	if (fence)
479a8c21a54SThe etnaviv authors 		etnaviv_gem_describe_fence(fence, "Exclusive", m);
480a8c21a54SThe etnaviv authors 	rcu_read_unlock();
481a8c21a54SThe etnaviv authors }
482a8c21a54SThe etnaviv authors 
483a8c21a54SThe etnaviv authors void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
484a8c21a54SThe etnaviv authors 	struct seq_file *m)
485a8c21a54SThe etnaviv authors {
486a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj;
487a8c21a54SThe etnaviv authors 	int count = 0;
488a8c21a54SThe etnaviv authors 	size_t size = 0;
489a8c21a54SThe etnaviv authors 
490a8c21a54SThe etnaviv authors 	mutex_lock(&priv->gem_lock);
491a8c21a54SThe etnaviv authors 	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
492a8c21a54SThe etnaviv authors 		struct drm_gem_object *obj = &etnaviv_obj->base;
493a8c21a54SThe etnaviv authors 
494a8c21a54SThe etnaviv authors 		seq_puts(m, "   ");
495a8c21a54SThe etnaviv authors 		etnaviv_gem_describe(obj, m);
496a8c21a54SThe etnaviv authors 		count++;
497a8c21a54SThe etnaviv authors 		size += obj->size;
498a8c21a54SThe etnaviv authors 	}
499a8c21a54SThe etnaviv authors 	mutex_unlock(&priv->gem_lock);
500a8c21a54SThe etnaviv authors 
501a8c21a54SThe etnaviv authors 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
502a8c21a54SThe etnaviv authors }
503a8c21a54SThe etnaviv authors #endif
504a8c21a54SThe etnaviv authors 
505a8c21a54SThe etnaviv authors static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
506a8c21a54SThe etnaviv authors {
507a8c21a54SThe etnaviv authors 	vunmap(etnaviv_obj->vaddr);
508a8c21a54SThe etnaviv authors 	put_pages(etnaviv_obj);
509a8c21a54SThe etnaviv authors }
510a8c21a54SThe etnaviv authors 
511a8c21a54SThe etnaviv authors static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
512a8c21a54SThe etnaviv authors 	.get_pages = etnaviv_gem_shmem_get_pages,
513a8c21a54SThe etnaviv authors 	.release = etnaviv_gem_shmem_release,
514a0a5ab3eSLucas Stach 	.vmap = etnaviv_gem_vmap_impl,
515a10e2bdeSLucas Stach 	.mmap = etnaviv_gem_mmap_obj,
516a8c21a54SThe etnaviv authors };
517a8c21a54SThe etnaviv authors 
518a8c21a54SThe etnaviv authors void etnaviv_gem_free_object(struct drm_gem_object *obj)
519a8c21a54SThe etnaviv authors {
520a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
52151841752SLucas Stach 	struct etnaviv_drm_private *priv = obj->dev->dev_private;
522a8c21a54SThe etnaviv authors 	struct etnaviv_vram_mapping *mapping, *tmp;
523a8c21a54SThe etnaviv authors 
524a8c21a54SThe etnaviv authors 	/* object should not be active */
525a8c21a54SThe etnaviv authors 	WARN_ON(is_active(etnaviv_obj));
526a8c21a54SThe etnaviv authors 
52751841752SLucas Stach 	mutex_lock(&priv->gem_lock);
528a8c21a54SThe etnaviv authors 	list_del(&etnaviv_obj->gem_node);
52951841752SLucas Stach 	mutex_unlock(&priv->gem_lock);
530a8c21a54SThe etnaviv authors 
531a8c21a54SThe etnaviv authors 	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
532a8c21a54SThe etnaviv authors 				 obj_node) {
53327b67278SLucas Stach 		struct etnaviv_iommu_context *context = mapping->context;
534a8c21a54SThe etnaviv authors 
535a8c21a54SThe etnaviv authors 		WARN_ON(mapping->use);
536a8c21a54SThe etnaviv authors 
537e6364d70SLucas Stach 		if (context) {
53827b67278SLucas Stach 			etnaviv_iommu_unmap_gem(context, mapping);
539e6364d70SLucas Stach 			etnaviv_iommu_context_put(context);
540e6364d70SLucas Stach 		}
541a8c21a54SThe etnaviv authors 
542a8c21a54SThe etnaviv authors 		list_del(&mapping->obj_node);
543a8c21a54SThe etnaviv authors 		kfree(mapping);
544a8c21a54SThe etnaviv authors 	}
545a8c21a54SThe etnaviv authors 
546a8c21a54SThe etnaviv authors 	drm_gem_free_mmap_offset(obj);
547a8c21a54SThe etnaviv authors 	etnaviv_obj->ops->release(etnaviv_obj);
548a8c21a54SThe etnaviv authors 	drm_gem_object_release(obj);
549a8c21a54SThe etnaviv authors 
550a8c21a54SThe etnaviv authors 	kfree(etnaviv_obj);
551a8c21a54SThe etnaviv authors }
552a8c21a54SThe etnaviv authors 
55354f09288SLucas Stach void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
554a8c21a54SThe etnaviv authors {
555a8c21a54SThe etnaviv authors 	struct etnaviv_drm_private *priv = dev->dev_private;
556a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
557a8c21a54SThe etnaviv authors 
558a8c21a54SThe etnaviv authors 	mutex_lock(&priv->gem_lock);
559a8c21a54SThe etnaviv authors 	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
560a8c21a54SThe etnaviv authors 	mutex_unlock(&priv->gem_lock);
561a8c21a54SThe etnaviv authors }
562a8c21a54SThe etnaviv authors 
563a8c21a54SThe etnaviv authors static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
564a8c21a54SThe etnaviv authors 	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
565a8c21a54SThe etnaviv authors 	struct drm_gem_object **obj)
566a8c21a54SThe etnaviv authors {
567a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj;
568a8c21a54SThe etnaviv authors 	unsigned sz = sizeof(*etnaviv_obj);
569a8c21a54SThe etnaviv authors 	bool valid = true;
570a8c21a54SThe etnaviv authors 
571a8c21a54SThe etnaviv authors 	/* validate flags */
572a8c21a54SThe etnaviv authors 	switch (flags & ETNA_BO_CACHE_MASK) {
573a8c21a54SThe etnaviv authors 	case ETNA_BO_UNCACHED:
574a8c21a54SThe etnaviv authors 	case ETNA_BO_CACHED:
575a8c21a54SThe etnaviv authors 	case ETNA_BO_WC:
576a8c21a54SThe etnaviv authors 		break;
577a8c21a54SThe etnaviv authors 	default:
578a8c21a54SThe etnaviv authors 		valid = false;
579a8c21a54SThe etnaviv authors 	}
580a8c21a54SThe etnaviv authors 
581a8c21a54SThe etnaviv authors 	if (!valid) {
582a8c21a54SThe etnaviv authors 		dev_err(dev->dev, "invalid cache flag: %x\n",
583a8c21a54SThe etnaviv authors 			(flags & ETNA_BO_CACHE_MASK));
584a8c21a54SThe etnaviv authors 		return -EINVAL;
585a8c21a54SThe etnaviv authors 	}
586a8c21a54SThe etnaviv authors 
587a8c21a54SThe etnaviv authors 	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
588a8c21a54SThe etnaviv authors 	if (!etnaviv_obj)
589a8c21a54SThe etnaviv authors 		return -ENOMEM;
590a8c21a54SThe etnaviv authors 
591a8c21a54SThe etnaviv authors 	etnaviv_obj->flags = flags;
592a8c21a54SThe etnaviv authors 	etnaviv_obj->ops = ops;
593fa238ea1SRob Herring 	if (robj)
594fa238ea1SRob Herring 		etnaviv_obj->base.resv = robj;
595a8c21a54SThe etnaviv authors 
596a8c21a54SThe etnaviv authors 	mutex_init(&etnaviv_obj->lock);
597a8c21a54SThe etnaviv authors 	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
598a8c21a54SThe etnaviv authors 
599a8c21a54SThe etnaviv authors 	*obj = &etnaviv_obj->base;
600a8c21a54SThe etnaviv authors 
601a8c21a54SThe etnaviv authors 	return 0;
602a8c21a54SThe etnaviv authors }
603a8c21a54SThe etnaviv authors 
604cdd32563SLucas Stach /* convenience method to construct a GEM buffer object, and userspace handle */
605cdd32563SLucas Stach int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
606cdd32563SLucas Stach 	u32 size, u32 flags, u32 *handle)
607a8c21a54SThe etnaviv authors {
608a8c21a54SThe etnaviv authors 	struct drm_gem_object *obj = NULL;
609a8c21a54SThe etnaviv authors 	int ret;
610a8c21a54SThe etnaviv authors 
611a8c21a54SThe etnaviv authors 	size = PAGE_ALIGN(size);
612a8c21a54SThe etnaviv authors 
613a8c21a54SThe etnaviv authors 	ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
614a8c21a54SThe etnaviv authors 				   &etnaviv_gem_shmem_ops, &obj);
615a8c21a54SThe etnaviv authors 	if (ret)
616a8c21a54SThe etnaviv authors 		goto fail;
617a8c21a54SThe etnaviv authors 
618d6a8743dSLucas Stach 	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
619d6a8743dSLucas Stach 
620a8c21a54SThe etnaviv authors 	ret = drm_gem_object_init(dev, obj, size);
621a8c21a54SThe etnaviv authors 	if (ret)
622a8c21a54SThe etnaviv authors 		goto fail;
623a8c21a54SThe etnaviv authors 
624fd2450a7SLucas Stach 	/*
625fd2450a7SLucas Stach 	 * Our buffers are kept pinned, so allocating them from the MOVABLE
626fd2450a7SLucas Stach 	 * zone is a really bad idea, and conflicts with CMA. See comments
627fd2450a7SLucas Stach 	 * above new_inode() why this is required _and_ expected if you're
628fd2450a7SLucas Stach 	 * going to pin these pages.
629fd2450a7SLucas Stach 	 */
630fd2450a7SLucas Stach 	mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
631fd2450a7SLucas Stach 			     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
632fd2450a7SLucas Stach 
63354f09288SLucas Stach 	etnaviv_gem_obj_add(dev, obj);
634a8c21a54SThe etnaviv authors 
635a8c21a54SThe etnaviv authors 	ret = drm_gem_handle_create(file, obj, handle);
636a8c21a54SThe etnaviv authors 
637a8c21a54SThe etnaviv authors 	/* drop reference from allocate - handle holds it now */
638cdd32563SLucas Stach fail:
63923d1dd03SCihangir Akturk 	drm_gem_object_put_unlocked(obj);
640a8c21a54SThe etnaviv authors 
641a8c21a54SThe etnaviv authors 	return ret;
642a8c21a54SThe etnaviv authors }
643a8c21a54SThe etnaviv authors 
644a8c21a54SThe etnaviv authors int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
645a8c21a54SThe etnaviv authors 	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
646a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object **res)
647a8c21a54SThe etnaviv authors {
648a8c21a54SThe etnaviv authors 	struct drm_gem_object *obj;
649a8c21a54SThe etnaviv authors 	int ret;
650a8c21a54SThe etnaviv authors 
651a8c21a54SThe etnaviv authors 	ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
652a8c21a54SThe etnaviv authors 	if (ret)
653a8c21a54SThe etnaviv authors 		return ret;
654a8c21a54SThe etnaviv authors 
655a8c21a54SThe etnaviv authors 	drm_gem_private_object_init(dev, obj, size);
656a8c21a54SThe etnaviv authors 
657a8c21a54SThe etnaviv authors 	*res = to_etnaviv_bo(obj);
658a8c21a54SThe etnaviv authors 
659a8c21a54SThe etnaviv authors 	return 0;
660a8c21a54SThe etnaviv authors }
661a8c21a54SThe etnaviv authors 
662a8c21a54SThe etnaviv authors static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
663a8c21a54SThe etnaviv authors {
664a8c21a54SThe etnaviv authors 	struct page **pvec = NULL;
665b2295c24SLucas Stach 	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
666b2295c24SLucas Stach 	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
667a8c21a54SThe etnaviv authors 
668783c06cbSLucas Stach 	might_lock_read(&current->mm->mmap_sem);
669783c06cbSLucas Stach 
670b2295c24SLucas Stach 	if (userptr->mm != current->mm)
671b2295c24SLucas Stach 		return -EPERM;
672b2295c24SLucas Stach 
673b2295c24SLucas Stach 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
674b2295c24SLucas Stach 	if (!pvec)
675b2295c24SLucas Stach 		return -ENOMEM;
676b2295c24SLucas Stach 
677b2295c24SLucas Stach 	do {
678b2295c24SLucas Stach 		unsigned num_pages = npages - pinned;
679b2295c24SLucas Stach 		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
680b2295c24SLucas Stach 		struct page **pages = pvec + pinned;
681b2295c24SLucas Stach 
682b2295c24SLucas Stach 		ret = get_user_pages_fast(ptr, num_pages,
683b2295c24SLucas Stach 					  !userptr->ro ? FOLL_WRITE : 0, pages);
684b2295c24SLucas Stach 		if (ret < 0) {
685b2295c24SLucas Stach 			release_pages(pvec, pinned);
686b2295c24SLucas Stach 			kvfree(pvec);
687a8c21a54SThe etnaviv authors 			return ret;
688a8c21a54SThe etnaviv authors 		}
689a8c21a54SThe etnaviv authors 
690b2295c24SLucas Stach 		pinned += ret;
691a8c21a54SThe etnaviv authors 
692b2295c24SLucas Stach 	} while (pinned < npages);
693a8c21a54SThe etnaviv authors 
694a8c21a54SThe etnaviv authors 	etnaviv_obj->pages = pvec;
695b2295c24SLucas Stach 
696a8c21a54SThe etnaviv authors 	return 0;
697a8c21a54SThe etnaviv authors }
698a8c21a54SThe etnaviv authors 
699a8c21a54SThe etnaviv authors static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
700a8c21a54SThe etnaviv authors {
701a8c21a54SThe etnaviv authors 	if (etnaviv_obj->sgt) {
702a8c21a54SThe etnaviv authors 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
703a8c21a54SThe etnaviv authors 		sg_free_table(etnaviv_obj->sgt);
704a8c21a54SThe etnaviv authors 		kfree(etnaviv_obj->sgt);
705a8c21a54SThe etnaviv authors 	}
706a8c21a54SThe etnaviv authors 	if (etnaviv_obj->pages) {
707a8c21a54SThe etnaviv authors 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
708a8c21a54SThe etnaviv authors 
709c6f92f9fSMel Gorman 		release_pages(etnaviv_obj->pages, npages);
7102098105eSMichal Hocko 		kvfree(etnaviv_obj->pages);
711a8c21a54SThe etnaviv authors 	}
712a8c21a54SThe etnaviv authors }
713a8c21a54SThe etnaviv authors 
714a10e2bdeSLucas Stach static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
715a10e2bdeSLucas Stach 		struct vm_area_struct *vma)
716a10e2bdeSLucas Stach {
717a10e2bdeSLucas Stach 	return -EINVAL;
718a10e2bdeSLucas Stach }
719a10e2bdeSLucas Stach 
720a8c21a54SThe etnaviv authors static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
721a8c21a54SThe etnaviv authors 	.get_pages = etnaviv_gem_userptr_get_pages,
722a8c21a54SThe etnaviv authors 	.release = etnaviv_gem_userptr_release,
723a0a5ab3eSLucas Stach 	.vmap = etnaviv_gem_vmap_impl,
724a10e2bdeSLucas Stach 	.mmap = etnaviv_gem_userptr_mmap_obj,
725a8c21a54SThe etnaviv authors };
726a8c21a54SThe etnaviv authors 
727a8c21a54SThe etnaviv authors int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
728a8c21a54SThe etnaviv authors 	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
729a8c21a54SThe etnaviv authors {
730a8c21a54SThe etnaviv authors 	struct etnaviv_gem_object *etnaviv_obj;
731a8c21a54SThe etnaviv authors 	int ret;
732a8c21a54SThe etnaviv authors 
733a8c21a54SThe etnaviv authors 	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
734a8c21a54SThe etnaviv authors 				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
735a8c21a54SThe etnaviv authors 	if (ret)
736a8c21a54SThe etnaviv authors 		return ret;
737a8c21a54SThe etnaviv authors 
738d6a8743dSLucas Stach 	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
739d6a8743dSLucas Stach 
740a8c21a54SThe etnaviv authors 	etnaviv_obj->userptr.ptr = ptr;
741b2295c24SLucas Stach 	etnaviv_obj->userptr.mm = current->mm;
742a8c21a54SThe etnaviv authors 	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
743a8c21a54SThe etnaviv authors 
74454f09288SLucas Stach 	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
745a8c21a54SThe etnaviv authors 
746a8c21a54SThe etnaviv authors 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
74754f09288SLucas Stach 
748a8c21a54SThe etnaviv authors 	/* drop reference from allocate - handle holds it now */
74923d1dd03SCihangir Akturk 	drm_gem_object_put_unlocked(&etnaviv_obj->base);
750a8c21a54SThe etnaviv authors 	return ret;
751a8c21a54SThe etnaviv authors }
752