1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/sched/mm.h>
20 #include <linux/sched/task.h>
21 
22 #include "etnaviv_drv.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 
27 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
28 {
29 	struct drm_device *dev = etnaviv_obj->base.dev;
30 	struct sg_table *sgt = etnaviv_obj->sgt;
31 
32 	/*
33 	 * For non-cached buffers, ensure the new pages are clean
34 	 * because display controller, GPU, etc. are not coherent.
35 	 */
36 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
37 		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
38 }
39 
40 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
41 {
42 	struct drm_device *dev = etnaviv_obj->base.dev;
43 	struct sg_table *sgt = etnaviv_obj->sgt;
44 
45 	/*
46 	 * For non-cached buffers, ensure the new pages are clean
47 	 * because display controller, GPU, etc. are not coherent:
48 	 *
49 	 * WARNING: The DMA API does not support concurrent CPU
50 	 * and device access to the memory area.  With BIDIRECTIONAL,
51 	 * we will clean the cache lines which overlap the region,
52 	 * and invalidate all cache lines (partially) contained in
53 	 * the region.
54 	 *
55 	 * If you have dirty data in the overlapping cache lines,
56 	 * that will corrupt the GPU-written data.  If you have
57 	 * written into the remainder of the region, this can
58 	 * discard those writes.
59 	 */
60 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
61 		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
62 }
63 
64 /* called with etnaviv_obj->lock held */
65 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
66 {
67 	struct drm_device *dev = etnaviv_obj->base.dev;
68 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
69 
70 	if (IS_ERR(p)) {
71 		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
72 		return PTR_ERR(p);
73 	}
74 
75 	etnaviv_obj->pages = p;
76 
77 	return 0;
78 }
79 
80 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
81 {
82 	if (etnaviv_obj->sgt) {
83 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
84 		sg_free_table(etnaviv_obj->sgt);
85 		kfree(etnaviv_obj->sgt);
86 		etnaviv_obj->sgt = NULL;
87 	}
88 	if (etnaviv_obj->pages) {
89 		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
90 				  true, false);
91 
92 		etnaviv_obj->pages = NULL;
93 	}
94 }
95 
96 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
97 {
98 	int ret;
99 
100 	lockdep_assert_held(&etnaviv_obj->lock);
101 
102 	if (!etnaviv_obj->pages) {
103 		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
104 		if (ret < 0)
105 			return ERR_PTR(ret);
106 	}
107 
108 	if (!etnaviv_obj->sgt) {
109 		struct drm_device *dev = etnaviv_obj->base.dev;
110 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
111 		struct sg_table *sgt;
112 
113 		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
114 		if (IS_ERR(sgt)) {
115 			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
116 				PTR_ERR(sgt));
117 			return ERR_CAST(sgt);
118 		}
119 
120 		etnaviv_obj->sgt = sgt;
121 
122 		etnaviv_gem_scatter_map(etnaviv_obj);
123 	}
124 
125 	return etnaviv_obj->pages;
126 }
127 
128 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
129 {
130 	lockdep_assert_held(&etnaviv_obj->lock);
131 	/* when we start tracking the pin count, then do something here */
132 }
133 
134 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
135 		struct vm_area_struct *vma)
136 {
137 	pgprot_t vm_page_prot;
138 
139 	vma->vm_flags &= ~VM_PFNMAP;
140 	vma->vm_flags |= VM_MIXEDMAP;
141 
142 	vm_page_prot = vm_get_page_prot(vma->vm_flags);
143 
144 	if (etnaviv_obj->flags & ETNA_BO_WC) {
145 		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
146 	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
147 		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
148 	} else {
149 		/*
150 		 * Shunt off cached objs to shmem file so they have their own
151 		 * address_space (so unmap_mapping_range does what we want,
152 		 * in particular in the case of mmap'd dmabufs)
153 		 */
154 		fput(vma->vm_file);
155 		get_file(etnaviv_obj->base.filp);
156 		vma->vm_pgoff = 0;
157 		vma->vm_file  = etnaviv_obj->base.filp;
158 
159 		vma->vm_page_prot = vm_page_prot;
160 	}
161 
162 	return 0;
163 }
164 
165 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
166 {
167 	struct etnaviv_gem_object *obj;
168 	int ret;
169 
170 	ret = drm_gem_mmap(filp, vma);
171 	if (ret) {
172 		DBG("mmap failed: %d", ret);
173 		return ret;
174 	}
175 
176 	obj = to_etnaviv_bo(vma->vm_private_data);
177 	return obj->ops->mmap(obj, vma);
178 }
179 
180 int etnaviv_gem_fault(struct vm_fault *vmf)
181 {
182 	struct vm_area_struct *vma = vmf->vma;
183 	struct drm_gem_object *obj = vma->vm_private_data;
184 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
185 	struct page **pages, *page;
186 	pgoff_t pgoff;
187 	int ret;
188 
189 	/*
190 	 * Make sure we don't parallel update on a fault, nor move or remove
191 	 * something from beneath our feet.  Note that vm_insert_page() is
192 	 * specifically coded to take care of this, so we don't have to.
193 	 */
194 	ret = mutex_lock_interruptible(&etnaviv_obj->lock);
195 	if (ret)
196 		goto out;
197 
198 	/* make sure we have pages attached now */
199 	pages = etnaviv_gem_get_pages(etnaviv_obj);
200 	mutex_unlock(&etnaviv_obj->lock);
201 
202 	if (IS_ERR(pages)) {
203 		ret = PTR_ERR(pages);
204 		goto out;
205 	}
206 
207 	/* We don't use vmf->pgoff since that has the fake offset: */
208 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
209 
210 	page = pages[pgoff];
211 
212 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
213 	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
214 
215 	ret = vm_insert_page(vma, vmf->address, page);
216 
217 out:
218 	switch (ret) {
219 	case -EAGAIN:
220 	case 0:
221 	case -ERESTARTSYS:
222 	case -EINTR:
223 	case -EBUSY:
224 		/*
225 		 * EBUSY is ok: this just means that another thread
226 		 * already did the job.
227 		 */
228 		return VM_FAULT_NOPAGE;
229 	case -ENOMEM:
230 		return VM_FAULT_OOM;
231 	default:
232 		return VM_FAULT_SIGBUS;
233 	}
234 }
235 
236 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
237 {
238 	int ret;
239 
240 	/* Make it mmapable */
241 	ret = drm_gem_create_mmap_offset(obj);
242 	if (ret)
243 		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
244 	else
245 		*offset = drm_vma_node_offset_addr(&obj->vma_node);
246 
247 	return ret;
248 }
249 
250 static struct etnaviv_vram_mapping *
251 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
252 			     struct etnaviv_iommu *mmu)
253 {
254 	struct etnaviv_vram_mapping *mapping;
255 
256 	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
257 		if (mapping->mmu == mmu)
258 			return mapping;
259 	}
260 
261 	return NULL;
262 }
263 
264 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
265 {
266 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
267 
268 	drm_gem_object_get(&etnaviv_obj->base);
269 
270 	mutex_lock(&etnaviv_obj->lock);
271 	WARN_ON(mapping->use == 0);
272 	mapping->use += 1;
273 	mutex_unlock(&etnaviv_obj->lock);
274 }
275 
276 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
277 {
278 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
279 
280 	mutex_lock(&etnaviv_obj->lock);
281 	WARN_ON(mapping->use == 0);
282 	mapping->use -= 1;
283 	mutex_unlock(&etnaviv_obj->lock);
284 
285 	drm_gem_object_put_unlocked(&etnaviv_obj->base);
286 }
287 
288 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
289 	struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
290 {
291 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
292 	struct etnaviv_vram_mapping *mapping;
293 	struct page **pages;
294 	int ret = 0;
295 
296 	mutex_lock(&etnaviv_obj->lock);
297 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
298 	if (mapping) {
299 		/*
300 		 * Holding the object lock prevents the use count changing
301 		 * beneath us.  If the use count is zero, the MMU might be
302 		 * reaping this object, so take the lock and re-check that
303 		 * the MMU owns this mapping to close this race.
304 		 */
305 		if (mapping->use == 0) {
306 			mutex_lock(&gpu->mmu->lock);
307 			if (mapping->mmu == gpu->mmu)
308 				mapping->use += 1;
309 			else
310 				mapping = NULL;
311 			mutex_unlock(&gpu->mmu->lock);
312 			if (mapping)
313 				goto out;
314 		} else {
315 			mapping->use += 1;
316 			goto out;
317 		}
318 	}
319 
320 	pages = etnaviv_gem_get_pages(etnaviv_obj);
321 	if (IS_ERR(pages)) {
322 		ret = PTR_ERR(pages);
323 		goto out;
324 	}
325 
326 	/*
327 	 * See if we have a reaped vram mapping we can re-use before
328 	 * allocating a fresh mapping.
329 	 */
330 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
331 	if (!mapping) {
332 		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
333 		if (!mapping) {
334 			ret = -ENOMEM;
335 			goto out;
336 		}
337 
338 		INIT_LIST_HEAD(&mapping->scan_node);
339 		mapping->object = etnaviv_obj;
340 	} else {
341 		list_del(&mapping->obj_node);
342 	}
343 
344 	mapping->mmu = gpu->mmu;
345 	mapping->use = 1;
346 
347 	ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
348 				    mapping);
349 	if (ret < 0)
350 		kfree(mapping);
351 	else
352 		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
353 
354 out:
355 	mutex_unlock(&etnaviv_obj->lock);
356 
357 	if (ret)
358 		return ERR_PTR(ret);
359 
360 	/* Take a reference on the object */
361 	drm_gem_object_get(obj);
362 	return mapping;
363 }
364 
365 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
366 {
367 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
368 
369 	if (etnaviv_obj->vaddr)
370 		return etnaviv_obj->vaddr;
371 
372 	mutex_lock(&etnaviv_obj->lock);
373 	/*
374 	 * Need to check again, as we might have raced with another thread
375 	 * while waiting for the mutex.
376 	 */
377 	if (!etnaviv_obj->vaddr)
378 		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
379 	mutex_unlock(&etnaviv_obj->lock);
380 
381 	return etnaviv_obj->vaddr;
382 }
383 
384 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
385 {
386 	struct page **pages;
387 
388 	lockdep_assert_held(&obj->lock);
389 
390 	pages = etnaviv_gem_get_pages(obj);
391 	if (IS_ERR(pages))
392 		return NULL;
393 
394 	return vmap(pages, obj->base.size >> PAGE_SHIFT,
395 			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
396 }
397 
398 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
399 {
400 	if (op & ETNA_PREP_READ)
401 		return DMA_FROM_DEVICE;
402 	else if (op & ETNA_PREP_WRITE)
403 		return DMA_TO_DEVICE;
404 	else
405 		return DMA_BIDIRECTIONAL;
406 }
407 
408 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
409 		struct timespec *timeout)
410 {
411 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
412 	struct drm_device *dev = obj->dev;
413 	bool write = !!(op & ETNA_PREP_WRITE);
414 	int ret;
415 
416 	if (!etnaviv_obj->sgt) {
417 		void *ret;
418 
419 		mutex_lock(&etnaviv_obj->lock);
420 		ret = etnaviv_gem_get_pages(etnaviv_obj);
421 		mutex_unlock(&etnaviv_obj->lock);
422 		if (IS_ERR(ret))
423 			return PTR_ERR(ret);
424 	}
425 
426 	if (op & ETNA_PREP_NOSYNC) {
427 		if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
428 							  write))
429 			return -EBUSY;
430 	} else {
431 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
432 
433 		ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
434 							  write, true, remain);
435 		if (ret <= 0)
436 			return ret == 0 ? -ETIMEDOUT : ret;
437 	}
438 
439 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
440 		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
441 				    etnaviv_obj->sgt->nents,
442 				    etnaviv_op_to_dma_dir(op));
443 		etnaviv_obj->last_cpu_prep_op = op;
444 	}
445 
446 	return 0;
447 }
448 
449 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
450 {
451 	struct drm_device *dev = obj->dev;
452 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
453 
454 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
455 		/* fini without a prep is almost certainly a userspace error */
456 		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
457 		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
458 			etnaviv_obj->sgt->nents,
459 			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
460 		etnaviv_obj->last_cpu_prep_op = 0;
461 	}
462 
463 	return 0;
464 }
465 
466 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
467 	struct timespec *timeout)
468 {
469 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
470 
471 	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
472 }
473 
474 #ifdef CONFIG_DEBUG_FS
475 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
476 	const char *type, struct seq_file *m)
477 {
478 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
479 		seq_printf(m, "\t%9s: %s %s seq %u\n",
480 			   type,
481 			   fence->ops->get_driver_name(fence),
482 			   fence->ops->get_timeline_name(fence),
483 			   fence->seqno);
484 }
485 
486 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
487 {
488 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
489 	struct reservation_object *robj = etnaviv_obj->resv;
490 	struct reservation_object_list *fobj;
491 	struct dma_fence *fence;
492 	unsigned long off = drm_vma_node_start(&obj->vma_node);
493 
494 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
495 			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
496 			obj->name, kref_read(&obj->refcount),
497 			off, etnaviv_obj->vaddr, obj->size);
498 
499 	rcu_read_lock();
500 	fobj = rcu_dereference(robj->fence);
501 	if (fobj) {
502 		unsigned int i, shared_count = fobj->shared_count;
503 
504 		for (i = 0; i < shared_count; i++) {
505 			fence = rcu_dereference(fobj->shared[i]);
506 			etnaviv_gem_describe_fence(fence, "Shared", m);
507 		}
508 	}
509 
510 	fence = rcu_dereference(robj->fence_excl);
511 	if (fence)
512 		etnaviv_gem_describe_fence(fence, "Exclusive", m);
513 	rcu_read_unlock();
514 }
515 
516 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
517 	struct seq_file *m)
518 {
519 	struct etnaviv_gem_object *etnaviv_obj;
520 	int count = 0;
521 	size_t size = 0;
522 
523 	mutex_lock(&priv->gem_lock);
524 	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
525 		struct drm_gem_object *obj = &etnaviv_obj->base;
526 
527 		seq_puts(m, "   ");
528 		etnaviv_gem_describe(obj, m);
529 		count++;
530 		size += obj->size;
531 	}
532 	mutex_unlock(&priv->gem_lock);
533 
534 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
535 }
536 #endif
537 
538 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
539 {
540 	vunmap(etnaviv_obj->vaddr);
541 	put_pages(etnaviv_obj);
542 }
543 
544 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
545 	.get_pages = etnaviv_gem_shmem_get_pages,
546 	.release = etnaviv_gem_shmem_release,
547 	.vmap = etnaviv_gem_vmap_impl,
548 	.mmap = etnaviv_gem_mmap_obj,
549 };
550 
551 void etnaviv_gem_free_object(struct drm_gem_object *obj)
552 {
553 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
554 	struct etnaviv_vram_mapping *mapping, *tmp;
555 
556 	/* object should not be active */
557 	WARN_ON(is_active(etnaviv_obj));
558 
559 	list_del(&etnaviv_obj->gem_node);
560 
561 	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
562 				 obj_node) {
563 		struct etnaviv_iommu *mmu = mapping->mmu;
564 
565 		WARN_ON(mapping->use);
566 
567 		if (mmu)
568 			etnaviv_iommu_unmap_gem(mmu, mapping);
569 
570 		list_del(&mapping->obj_node);
571 		kfree(mapping);
572 	}
573 
574 	drm_gem_free_mmap_offset(obj);
575 	etnaviv_obj->ops->release(etnaviv_obj);
576 	if (etnaviv_obj->resv == &etnaviv_obj->_resv)
577 		reservation_object_fini(&etnaviv_obj->_resv);
578 	drm_gem_object_release(obj);
579 
580 	kfree(etnaviv_obj);
581 }
582 
583 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
584 {
585 	struct etnaviv_drm_private *priv = dev->dev_private;
586 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
587 
588 	mutex_lock(&priv->gem_lock);
589 	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
590 	mutex_unlock(&priv->gem_lock);
591 
592 	return 0;
593 }
594 
595 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
596 	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
597 	struct drm_gem_object **obj)
598 {
599 	struct etnaviv_gem_object *etnaviv_obj;
600 	unsigned sz = sizeof(*etnaviv_obj);
601 	bool valid = true;
602 
603 	/* validate flags */
604 	switch (flags & ETNA_BO_CACHE_MASK) {
605 	case ETNA_BO_UNCACHED:
606 	case ETNA_BO_CACHED:
607 	case ETNA_BO_WC:
608 		break;
609 	default:
610 		valid = false;
611 	}
612 
613 	if (!valid) {
614 		dev_err(dev->dev, "invalid cache flag: %x\n",
615 			(flags & ETNA_BO_CACHE_MASK));
616 		return -EINVAL;
617 	}
618 
619 	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
620 	if (!etnaviv_obj)
621 		return -ENOMEM;
622 
623 	etnaviv_obj->flags = flags;
624 	etnaviv_obj->ops = ops;
625 	if (robj) {
626 		etnaviv_obj->resv = robj;
627 	} else {
628 		etnaviv_obj->resv = &etnaviv_obj->_resv;
629 		reservation_object_init(&etnaviv_obj->_resv);
630 	}
631 
632 	mutex_init(&etnaviv_obj->lock);
633 	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
634 
635 	*obj = &etnaviv_obj->base;
636 
637 	return 0;
638 }
639 
640 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
641 		u32 size, u32 flags)
642 {
643 	struct drm_gem_object *obj = NULL;
644 	int ret;
645 
646 	size = PAGE_ALIGN(size);
647 
648 	ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
649 				   &etnaviv_gem_shmem_ops, &obj);
650 	if (ret)
651 		goto fail;
652 
653 	ret = drm_gem_object_init(dev, obj, size);
654 	if (ret == 0) {
655 		struct address_space *mapping;
656 
657 		/*
658 		 * Our buffers are kept pinned, so allocating them
659 		 * from the MOVABLE zone is a really bad idea, and
660 		 * conflicts with CMA.  See coments above new_inode()
661 		 * why this is required _and_ expected if you're
662 		 * going to pin these pages.
663 		 */
664 		mapping = obj->filp->f_mapping;
665 		mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
666 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
667 	}
668 
669 	if (ret)
670 		goto fail;
671 
672 	return obj;
673 
674 fail:
675 	drm_gem_object_put_unlocked(obj);
676 	return ERR_PTR(ret);
677 }
678 
679 /* convenience method to construct a GEM buffer object, and userspace handle */
680 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
681 		u32 size, u32 flags, u32 *handle)
682 {
683 	struct drm_gem_object *obj;
684 	int ret;
685 
686 	obj = __etnaviv_gem_new(dev, size, flags);
687 	if (IS_ERR(obj))
688 		return PTR_ERR(obj);
689 
690 	ret = etnaviv_gem_obj_add(dev, obj);
691 	if (ret < 0) {
692 		drm_gem_object_put_unlocked(obj);
693 		return ret;
694 	}
695 
696 	ret = drm_gem_handle_create(file, obj, handle);
697 
698 	/* drop reference from allocate - handle holds it now */
699 	drm_gem_object_put_unlocked(obj);
700 
701 	return ret;
702 }
703 
704 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
705 		u32 size, u32 flags)
706 {
707 	struct drm_gem_object *obj;
708 	int ret;
709 
710 	obj = __etnaviv_gem_new(dev, size, flags);
711 	if (IS_ERR(obj))
712 		return obj;
713 
714 	ret = etnaviv_gem_obj_add(dev, obj);
715 	if (ret < 0) {
716 		drm_gem_object_put_unlocked(obj);
717 		return ERR_PTR(ret);
718 	}
719 
720 	return obj;
721 }
722 
723 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
724 	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
725 	struct etnaviv_gem_object **res)
726 {
727 	struct drm_gem_object *obj;
728 	int ret;
729 
730 	ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
731 	if (ret)
732 		return ret;
733 
734 	drm_gem_private_object_init(dev, obj, size);
735 
736 	*res = to_etnaviv_bo(obj);
737 
738 	return 0;
739 }
740 
741 struct get_pages_work {
742 	struct work_struct work;
743 	struct mm_struct *mm;
744 	struct task_struct *task;
745 	struct etnaviv_gem_object *etnaviv_obj;
746 };
747 
748 static struct page **etnaviv_gem_userptr_do_get_pages(
749 	struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
750 {
751 	int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
752 	struct page **pvec;
753 	uintptr_t ptr;
754 	unsigned int flags = 0;
755 
756 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
757 	if (!pvec)
758 		return ERR_PTR(-ENOMEM);
759 
760 	if (!etnaviv_obj->userptr.ro)
761 		flags |= FOLL_WRITE;
762 
763 	pinned = 0;
764 	ptr = etnaviv_obj->userptr.ptr;
765 
766 	down_read(&mm->mmap_sem);
767 	while (pinned < npages) {
768 		ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
769 					    flags, pvec + pinned, NULL, NULL);
770 		if (ret < 0)
771 			break;
772 
773 		ptr += ret * PAGE_SIZE;
774 		pinned += ret;
775 	}
776 	up_read(&mm->mmap_sem);
777 
778 	if (ret < 0) {
779 		release_pages(pvec, pinned, 0);
780 		kvfree(pvec);
781 		return ERR_PTR(ret);
782 	}
783 
784 	return pvec;
785 }
786 
787 static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
788 {
789 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
790 	struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
791 	struct page **pvec;
792 
793 	pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
794 
795 	mutex_lock(&etnaviv_obj->lock);
796 	if (IS_ERR(pvec)) {
797 		etnaviv_obj->userptr.work = ERR_CAST(pvec);
798 	} else {
799 		etnaviv_obj->userptr.work = NULL;
800 		etnaviv_obj->pages = pvec;
801 	}
802 
803 	mutex_unlock(&etnaviv_obj->lock);
804 	drm_gem_object_put_unlocked(&etnaviv_obj->base);
805 
806 	mmput(work->mm);
807 	put_task_struct(work->task);
808 	kfree(work);
809 }
810 
811 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
812 {
813 	struct page **pvec = NULL;
814 	struct get_pages_work *work;
815 	struct mm_struct *mm;
816 	int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
817 
818 	if (etnaviv_obj->userptr.work) {
819 		if (IS_ERR(etnaviv_obj->userptr.work)) {
820 			ret = PTR_ERR(etnaviv_obj->userptr.work);
821 			etnaviv_obj->userptr.work = NULL;
822 		} else {
823 			ret = -EAGAIN;
824 		}
825 		return ret;
826 	}
827 
828 	mm = get_task_mm(etnaviv_obj->userptr.task);
829 	pinned = 0;
830 	if (mm == current->mm) {
831 		pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
832 		if (!pvec) {
833 			mmput(mm);
834 			return -ENOMEM;
835 		}
836 
837 		pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
838 					       !etnaviv_obj->userptr.ro, pvec);
839 		if (pinned < 0) {
840 			kvfree(pvec);
841 			mmput(mm);
842 			return pinned;
843 		}
844 
845 		if (pinned == npages) {
846 			etnaviv_obj->pages = pvec;
847 			mmput(mm);
848 			return 0;
849 		}
850 	}
851 
852 	release_pages(pvec, pinned, 0);
853 	kvfree(pvec);
854 
855 	work = kmalloc(sizeof(*work), GFP_KERNEL);
856 	if (!work) {
857 		mmput(mm);
858 		return -ENOMEM;
859 	}
860 
861 	get_task_struct(current);
862 	drm_gem_object_get(&etnaviv_obj->base);
863 
864 	work->mm = mm;
865 	work->task = current;
866 	work->etnaviv_obj = etnaviv_obj;
867 
868 	etnaviv_obj->userptr.work = &work->work;
869 	INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
870 
871 	etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
872 
873 	return -EAGAIN;
874 }
875 
876 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
877 {
878 	if (etnaviv_obj->sgt) {
879 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
880 		sg_free_table(etnaviv_obj->sgt);
881 		kfree(etnaviv_obj->sgt);
882 	}
883 	if (etnaviv_obj->pages) {
884 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
885 
886 		release_pages(etnaviv_obj->pages, npages, 0);
887 		kvfree(etnaviv_obj->pages);
888 	}
889 	put_task_struct(etnaviv_obj->userptr.task);
890 }
891 
892 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
893 		struct vm_area_struct *vma)
894 {
895 	return -EINVAL;
896 }
897 
898 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
899 	.get_pages = etnaviv_gem_userptr_get_pages,
900 	.release = etnaviv_gem_userptr_release,
901 	.vmap = etnaviv_gem_vmap_impl,
902 	.mmap = etnaviv_gem_userptr_mmap_obj,
903 };
904 
905 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
906 	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
907 {
908 	struct etnaviv_gem_object *etnaviv_obj;
909 	int ret;
910 
911 	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
912 				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
913 	if (ret)
914 		return ret;
915 
916 	etnaviv_obj->userptr.ptr = ptr;
917 	etnaviv_obj->userptr.task = current;
918 	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
919 	get_task_struct(current);
920 
921 	ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
922 	if (ret)
923 		goto unreference;
924 
925 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
926 unreference:
927 	/* drop reference from allocate - handle holds it now */
928 	drm_gem_object_put_unlocked(&etnaviv_obj->base);
929 	return ret;
930 }
931