1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
19 
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_mmu.h"
24 
25 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26 {
27 	struct drm_device *dev = etnaviv_obj->base.dev;
28 	struct sg_table *sgt = etnaviv_obj->sgt;
29 
30 	/*
31 	 * For non-cached buffers, ensure the new pages are clean
32 	 * because display controller, GPU, etc. are not coherent.
33 	 */
34 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36 }
37 
38 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39 {
40 	struct drm_device *dev = etnaviv_obj->base.dev;
41 	struct sg_table *sgt = etnaviv_obj->sgt;
42 
43 	/*
44 	 * For non-cached buffers, ensure the new pages are clean
45 	 * because display controller, GPU, etc. are not coherent:
46 	 *
47 	 * WARNING: The DMA API does not support concurrent CPU
48 	 * and device access to the memory area.  With BIDIRECTIONAL,
49 	 * we will clean the cache lines which overlap the region,
50 	 * and invalidate all cache lines (partially) contained in
51 	 * the region.
52 	 *
53 	 * If you have dirty data in the overlapping cache lines,
54 	 * that will corrupt the GPU-written data.  If you have
55 	 * written into the remainder of the region, this can
56 	 * discard those writes.
57 	 */
58 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60 }
61 
62 /* called with etnaviv_obj->lock held */
63 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64 {
65 	struct drm_device *dev = etnaviv_obj->base.dev;
66 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67 
68 	if (IS_ERR(p)) {
69 		dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 		return PTR_ERR(p);
71 	}
72 
73 	etnaviv_obj->pages = p;
74 
75 	return 0;
76 }
77 
78 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79 {
80 	if (etnaviv_obj->sgt) {
81 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 		sg_free_table(etnaviv_obj->sgt);
83 		kfree(etnaviv_obj->sgt);
84 		etnaviv_obj->sgt = NULL;
85 	}
86 	if (etnaviv_obj->pages) {
87 		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 				  true, false);
89 
90 		etnaviv_obj->pages = NULL;
91 	}
92 }
93 
94 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95 {
96 	int ret;
97 
98 	lockdep_assert_held(&etnaviv_obj->lock);
99 
100 	if (!etnaviv_obj->pages) {
101 		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 		if (ret < 0)
103 			return ERR_PTR(ret);
104 	}
105 
106 	if (!etnaviv_obj->sgt) {
107 		struct drm_device *dev = etnaviv_obj->base.dev;
108 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 		struct sg_table *sgt;
110 
111 		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 		if (IS_ERR(sgt)) {
113 			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 				PTR_ERR(sgt));
115 			return ERR_CAST(sgt);
116 		}
117 
118 		etnaviv_obj->sgt = sgt;
119 
120 		etnaviv_gem_scatter_map(etnaviv_obj);
121 	}
122 
123 	return etnaviv_obj->pages;
124 }
125 
126 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127 {
128 	lockdep_assert_held(&etnaviv_obj->lock);
129 	/* when we start tracking the pin count, then do something here */
130 }
131 
132 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
133 		struct vm_area_struct *vma)
134 {
135 	pgprot_t vm_page_prot;
136 
137 	vma->vm_flags &= ~VM_PFNMAP;
138 	vma->vm_flags |= VM_MIXEDMAP;
139 
140 	vm_page_prot = vm_get_page_prot(vma->vm_flags);
141 
142 	if (etnaviv_obj->flags & ETNA_BO_WC) {
143 		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
144 	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
145 		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
146 	} else {
147 		/*
148 		 * Shunt off cached objs to shmem file so they have their own
149 		 * address_space (so unmap_mapping_range does what we want,
150 		 * in particular in the case of mmap'd dmabufs)
151 		 */
152 		fput(vma->vm_file);
153 		get_file(etnaviv_obj->base.filp);
154 		vma->vm_pgoff = 0;
155 		vma->vm_file  = etnaviv_obj->base.filp;
156 
157 		vma->vm_page_prot = vm_page_prot;
158 	}
159 
160 	return 0;
161 }
162 
163 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
164 {
165 	struct etnaviv_gem_object *obj;
166 	int ret;
167 
168 	ret = drm_gem_mmap(filp, vma);
169 	if (ret) {
170 		DBG("mmap failed: %d", ret);
171 		return ret;
172 	}
173 
174 	obj = to_etnaviv_bo(vma->vm_private_data);
175 	return obj->ops->mmap(obj, vma);
176 }
177 
178 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
179 {
180 	struct drm_gem_object *obj = vma->vm_private_data;
181 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
182 	struct page **pages, *page;
183 	pgoff_t pgoff;
184 	int ret;
185 
186 	/*
187 	 * Make sure we don't parallel update on a fault, nor move or remove
188 	 * something from beneath our feet.  Note that vm_insert_page() is
189 	 * specifically coded to take care of this, so we don't have to.
190 	 */
191 	ret = mutex_lock_interruptible(&etnaviv_obj->lock);
192 	if (ret)
193 		goto out;
194 
195 	/* make sure we have pages attached now */
196 	pages = etnaviv_gem_get_pages(etnaviv_obj);
197 	mutex_unlock(&etnaviv_obj->lock);
198 
199 	if (IS_ERR(pages)) {
200 		ret = PTR_ERR(pages);
201 		goto out;
202 	}
203 
204 	/* We don't use vmf->pgoff since that has the fake offset: */
205 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
206 
207 	page = pages[pgoff];
208 
209 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
210 	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
211 
212 	ret = vm_insert_page(vma, vmf->address, page);
213 
214 out:
215 	switch (ret) {
216 	case -EAGAIN:
217 	case 0:
218 	case -ERESTARTSYS:
219 	case -EINTR:
220 	case -EBUSY:
221 		/*
222 		 * EBUSY is ok: this just means that another thread
223 		 * already did the job.
224 		 */
225 		return VM_FAULT_NOPAGE;
226 	case -ENOMEM:
227 		return VM_FAULT_OOM;
228 	default:
229 		return VM_FAULT_SIGBUS;
230 	}
231 }
232 
233 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
234 {
235 	int ret;
236 
237 	/* Make it mmapable */
238 	ret = drm_gem_create_mmap_offset(obj);
239 	if (ret)
240 		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
241 	else
242 		*offset = drm_vma_node_offset_addr(&obj->vma_node);
243 
244 	return ret;
245 }
246 
247 static struct etnaviv_vram_mapping *
248 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
249 			     struct etnaviv_iommu *mmu)
250 {
251 	struct etnaviv_vram_mapping *mapping;
252 
253 	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
254 		if (mapping->mmu == mmu)
255 			return mapping;
256 	}
257 
258 	return NULL;
259 }
260 
261 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
262 {
263 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
264 
265 	drm_gem_object_reference(&etnaviv_obj->base);
266 
267 	mutex_lock(&etnaviv_obj->lock);
268 	WARN_ON(mapping->use == 0);
269 	mapping->use += 1;
270 	mutex_unlock(&etnaviv_obj->lock);
271 }
272 
273 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
274 {
275 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
276 
277 	mutex_lock(&etnaviv_obj->lock);
278 	WARN_ON(mapping->use == 0);
279 	mapping->use -= 1;
280 	mutex_unlock(&etnaviv_obj->lock);
281 
282 	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
283 }
284 
285 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
286 	struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
287 {
288 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
289 	struct etnaviv_vram_mapping *mapping;
290 	struct page **pages;
291 	int ret = 0;
292 
293 	mutex_lock(&etnaviv_obj->lock);
294 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
295 	if (mapping) {
296 		/*
297 		 * Holding the object lock prevents the use count changing
298 		 * beneath us.  If the use count is zero, the MMU might be
299 		 * reaping this object, so take the lock and re-check that
300 		 * the MMU owns this mapping to close this race.
301 		 */
302 		if (mapping->use == 0) {
303 			mutex_lock(&gpu->mmu->lock);
304 			if (mapping->mmu == gpu->mmu)
305 				mapping->use += 1;
306 			else
307 				mapping = NULL;
308 			mutex_unlock(&gpu->mmu->lock);
309 			if (mapping)
310 				goto out;
311 		} else {
312 			mapping->use += 1;
313 			goto out;
314 		}
315 	}
316 
317 	pages = etnaviv_gem_get_pages(etnaviv_obj);
318 	if (IS_ERR(pages)) {
319 		ret = PTR_ERR(pages);
320 		goto out;
321 	}
322 
323 	/*
324 	 * See if we have a reaped vram mapping we can re-use before
325 	 * allocating a fresh mapping.
326 	 */
327 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
328 	if (!mapping) {
329 		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
330 		if (!mapping) {
331 			ret = -ENOMEM;
332 			goto out;
333 		}
334 
335 		INIT_LIST_HEAD(&mapping->scan_node);
336 		mapping->object = etnaviv_obj;
337 	} else {
338 		list_del(&mapping->obj_node);
339 	}
340 
341 	mapping->mmu = gpu->mmu;
342 	mapping->use = 1;
343 
344 	ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
345 				    mapping);
346 	if (ret < 0)
347 		kfree(mapping);
348 	else
349 		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
350 
351 out:
352 	mutex_unlock(&etnaviv_obj->lock);
353 
354 	if (ret)
355 		return ERR_PTR(ret);
356 
357 	/* Take a reference on the object */
358 	drm_gem_object_reference(obj);
359 	return mapping;
360 }
361 
362 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
363 {
364 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
365 
366 	if (etnaviv_obj->vaddr)
367 		return etnaviv_obj->vaddr;
368 
369 	mutex_lock(&etnaviv_obj->lock);
370 	/*
371 	 * Need to check again, as we might have raced with another thread
372 	 * while waiting for the mutex.
373 	 */
374 	if (!etnaviv_obj->vaddr)
375 		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
376 	mutex_unlock(&etnaviv_obj->lock);
377 
378 	return etnaviv_obj->vaddr;
379 }
380 
381 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
382 {
383 	struct page **pages;
384 
385 	lockdep_assert_held(&obj->lock);
386 
387 	pages = etnaviv_gem_get_pages(obj);
388 	if (IS_ERR(pages))
389 		return NULL;
390 
391 	return vmap(pages, obj->base.size >> PAGE_SHIFT,
392 			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
393 }
394 
395 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
396 {
397 	if (op & ETNA_PREP_READ)
398 		return DMA_FROM_DEVICE;
399 	else if (op & ETNA_PREP_WRITE)
400 		return DMA_TO_DEVICE;
401 	else
402 		return DMA_BIDIRECTIONAL;
403 }
404 
405 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
406 		struct timespec *timeout)
407 {
408 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
409 	struct drm_device *dev = obj->dev;
410 	bool write = !!(op & ETNA_PREP_WRITE);
411 	unsigned long remain =
412 		op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
413 	long lret;
414 
415 	lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
416 						   write, true, remain);
417 	if (lret < 0)
418 		return lret;
419 	else if (lret == 0)
420 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
421 
422 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
423 		if (!etnaviv_obj->sgt) {
424 			void *ret;
425 
426 			mutex_lock(&etnaviv_obj->lock);
427 			ret = etnaviv_gem_get_pages(etnaviv_obj);
428 			mutex_unlock(&etnaviv_obj->lock);
429 			if (IS_ERR(ret))
430 				return PTR_ERR(ret);
431 		}
432 
433 		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
434 				    etnaviv_obj->sgt->nents,
435 				    etnaviv_op_to_dma_dir(op));
436 		etnaviv_obj->last_cpu_prep_op = op;
437 	}
438 
439 	return 0;
440 }
441 
442 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
443 {
444 	struct drm_device *dev = obj->dev;
445 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
446 
447 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
448 		/* fini without a prep is almost certainly a userspace error */
449 		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
450 		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
451 			etnaviv_obj->sgt->nents,
452 			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
453 		etnaviv_obj->last_cpu_prep_op = 0;
454 	}
455 
456 	return 0;
457 }
458 
459 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
460 	struct timespec *timeout)
461 {
462 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
463 
464 	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
465 }
466 
467 #ifdef CONFIG_DEBUG_FS
468 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
469 	const char *type, struct seq_file *m)
470 {
471 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
472 		seq_printf(m, "\t%9s: %s %s seq %u\n",
473 			   type,
474 			   fence->ops->get_driver_name(fence),
475 			   fence->ops->get_timeline_name(fence),
476 			   fence->seqno);
477 }
478 
479 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
480 {
481 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
482 	struct reservation_object *robj = etnaviv_obj->resv;
483 	struct reservation_object_list *fobj;
484 	struct dma_fence *fence;
485 	unsigned long off = drm_vma_node_start(&obj->vma_node);
486 
487 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
488 			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
489 			obj->name, obj->refcount.refcount.counter,
490 			off, etnaviv_obj->vaddr, obj->size);
491 
492 	rcu_read_lock();
493 	fobj = rcu_dereference(robj->fence);
494 	if (fobj) {
495 		unsigned int i, shared_count = fobj->shared_count;
496 
497 		for (i = 0; i < shared_count; i++) {
498 			fence = rcu_dereference(fobj->shared[i]);
499 			etnaviv_gem_describe_fence(fence, "Shared", m);
500 		}
501 	}
502 
503 	fence = rcu_dereference(robj->fence_excl);
504 	if (fence)
505 		etnaviv_gem_describe_fence(fence, "Exclusive", m);
506 	rcu_read_unlock();
507 }
508 
509 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
510 	struct seq_file *m)
511 {
512 	struct etnaviv_gem_object *etnaviv_obj;
513 	int count = 0;
514 	size_t size = 0;
515 
516 	mutex_lock(&priv->gem_lock);
517 	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
518 		struct drm_gem_object *obj = &etnaviv_obj->base;
519 
520 		seq_puts(m, "   ");
521 		etnaviv_gem_describe(obj, m);
522 		count++;
523 		size += obj->size;
524 	}
525 	mutex_unlock(&priv->gem_lock);
526 
527 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
528 }
529 #endif
530 
531 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
532 {
533 	vunmap(etnaviv_obj->vaddr);
534 	put_pages(etnaviv_obj);
535 }
536 
537 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
538 	.get_pages = etnaviv_gem_shmem_get_pages,
539 	.release = etnaviv_gem_shmem_release,
540 	.vmap = etnaviv_gem_vmap_impl,
541 	.mmap = etnaviv_gem_mmap_obj,
542 };
543 
544 void etnaviv_gem_free_object(struct drm_gem_object *obj)
545 {
546 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
547 	struct etnaviv_vram_mapping *mapping, *tmp;
548 
549 	/* object should not be active */
550 	WARN_ON(is_active(etnaviv_obj));
551 
552 	list_del(&etnaviv_obj->gem_node);
553 
554 	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
555 				 obj_node) {
556 		struct etnaviv_iommu *mmu = mapping->mmu;
557 
558 		WARN_ON(mapping->use);
559 
560 		if (mmu)
561 			etnaviv_iommu_unmap_gem(mmu, mapping);
562 
563 		list_del(&mapping->obj_node);
564 		kfree(mapping);
565 	}
566 
567 	drm_gem_free_mmap_offset(obj);
568 	etnaviv_obj->ops->release(etnaviv_obj);
569 	if (etnaviv_obj->resv == &etnaviv_obj->_resv)
570 		reservation_object_fini(&etnaviv_obj->_resv);
571 	drm_gem_object_release(obj);
572 
573 	kfree(etnaviv_obj);
574 }
575 
576 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
577 {
578 	struct etnaviv_drm_private *priv = dev->dev_private;
579 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
580 
581 	mutex_lock(&priv->gem_lock);
582 	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
583 	mutex_unlock(&priv->gem_lock);
584 
585 	return 0;
586 }
587 
588 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
589 	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
590 	struct drm_gem_object **obj)
591 {
592 	struct etnaviv_gem_object *etnaviv_obj;
593 	unsigned sz = sizeof(*etnaviv_obj);
594 	bool valid = true;
595 
596 	/* validate flags */
597 	switch (flags & ETNA_BO_CACHE_MASK) {
598 	case ETNA_BO_UNCACHED:
599 	case ETNA_BO_CACHED:
600 	case ETNA_BO_WC:
601 		break;
602 	default:
603 		valid = false;
604 	}
605 
606 	if (!valid) {
607 		dev_err(dev->dev, "invalid cache flag: %x\n",
608 			(flags & ETNA_BO_CACHE_MASK));
609 		return -EINVAL;
610 	}
611 
612 	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
613 	if (!etnaviv_obj)
614 		return -ENOMEM;
615 
616 	etnaviv_obj->flags = flags;
617 	etnaviv_obj->ops = ops;
618 	if (robj) {
619 		etnaviv_obj->resv = robj;
620 	} else {
621 		etnaviv_obj->resv = &etnaviv_obj->_resv;
622 		reservation_object_init(&etnaviv_obj->_resv);
623 	}
624 
625 	mutex_init(&etnaviv_obj->lock);
626 	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
627 
628 	*obj = &etnaviv_obj->base;
629 
630 	return 0;
631 }
632 
633 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
634 		u32 size, u32 flags)
635 {
636 	struct drm_gem_object *obj = NULL;
637 	int ret;
638 
639 	size = PAGE_ALIGN(size);
640 
641 	ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
642 				   &etnaviv_gem_shmem_ops, &obj);
643 	if (ret)
644 		goto fail;
645 
646 	ret = drm_gem_object_init(dev, obj, size);
647 	if (ret == 0) {
648 		struct address_space *mapping;
649 
650 		/*
651 		 * Our buffers are kept pinned, so allocating them
652 		 * from the MOVABLE zone is a really bad idea, and
653 		 * conflicts with CMA.  See coments above new_inode()
654 		 * why this is required _and_ expected if you're
655 		 * going to pin these pages.
656 		 */
657 		mapping = obj->filp->f_mapping;
658 		mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
659 	}
660 
661 	if (ret)
662 		goto fail;
663 
664 	return obj;
665 
666 fail:
667 	drm_gem_object_unreference_unlocked(obj);
668 	return ERR_PTR(ret);
669 }
670 
671 /* convenience method to construct a GEM buffer object, and userspace handle */
672 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
673 		u32 size, u32 flags, u32 *handle)
674 {
675 	struct drm_gem_object *obj;
676 	int ret;
677 
678 	obj = __etnaviv_gem_new(dev, size, flags);
679 	if (IS_ERR(obj))
680 		return PTR_ERR(obj);
681 
682 	ret = etnaviv_gem_obj_add(dev, obj);
683 	if (ret < 0) {
684 		drm_gem_object_unreference_unlocked(obj);
685 		return ret;
686 	}
687 
688 	ret = drm_gem_handle_create(file, obj, handle);
689 
690 	/* drop reference from allocate - handle holds it now */
691 	drm_gem_object_unreference_unlocked(obj);
692 
693 	return ret;
694 }
695 
696 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
697 		u32 size, u32 flags)
698 {
699 	struct drm_gem_object *obj;
700 	int ret;
701 
702 	obj = __etnaviv_gem_new(dev, size, flags);
703 	if (IS_ERR(obj))
704 		return obj;
705 
706 	ret = etnaviv_gem_obj_add(dev, obj);
707 	if (ret < 0) {
708 		drm_gem_object_unreference_unlocked(obj);
709 		return ERR_PTR(ret);
710 	}
711 
712 	return obj;
713 }
714 
715 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
716 	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
717 	struct etnaviv_gem_object **res)
718 {
719 	struct drm_gem_object *obj;
720 	int ret;
721 
722 	ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
723 	if (ret)
724 		return ret;
725 
726 	drm_gem_private_object_init(dev, obj, size);
727 
728 	*res = to_etnaviv_bo(obj);
729 
730 	return 0;
731 }
732 
733 struct get_pages_work {
734 	struct work_struct work;
735 	struct mm_struct *mm;
736 	struct task_struct *task;
737 	struct etnaviv_gem_object *etnaviv_obj;
738 };
739 
740 static struct page **etnaviv_gem_userptr_do_get_pages(
741 	struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
742 {
743 	int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
744 	struct page **pvec;
745 	uintptr_t ptr;
746 	unsigned int flags = 0;
747 
748 	pvec = drm_malloc_ab(npages, sizeof(struct page *));
749 	if (!pvec)
750 		return ERR_PTR(-ENOMEM);
751 
752 	if (!etnaviv_obj->userptr.ro)
753 		flags |= FOLL_WRITE;
754 
755 	pinned = 0;
756 	ptr = etnaviv_obj->userptr.ptr;
757 
758 	down_read(&mm->mmap_sem);
759 	while (pinned < npages) {
760 		ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
761 					    flags, pvec + pinned, NULL, NULL);
762 		if (ret < 0)
763 			break;
764 
765 		ptr += ret * PAGE_SIZE;
766 		pinned += ret;
767 	}
768 	up_read(&mm->mmap_sem);
769 
770 	if (ret < 0) {
771 		release_pages(pvec, pinned, 0);
772 		drm_free_large(pvec);
773 		return ERR_PTR(ret);
774 	}
775 
776 	return pvec;
777 }
778 
779 static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
780 {
781 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
782 	struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
783 	struct page **pvec;
784 
785 	pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
786 
787 	mutex_lock(&etnaviv_obj->lock);
788 	if (IS_ERR(pvec)) {
789 		etnaviv_obj->userptr.work = ERR_CAST(pvec);
790 	} else {
791 		etnaviv_obj->userptr.work = NULL;
792 		etnaviv_obj->pages = pvec;
793 	}
794 
795 	mutex_unlock(&etnaviv_obj->lock);
796 	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
797 
798 	mmput(work->mm);
799 	put_task_struct(work->task);
800 	kfree(work);
801 }
802 
803 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
804 {
805 	struct page **pvec = NULL;
806 	struct get_pages_work *work;
807 	struct mm_struct *mm;
808 	int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
809 
810 	if (etnaviv_obj->userptr.work) {
811 		if (IS_ERR(etnaviv_obj->userptr.work)) {
812 			ret = PTR_ERR(etnaviv_obj->userptr.work);
813 			etnaviv_obj->userptr.work = NULL;
814 		} else {
815 			ret = -EAGAIN;
816 		}
817 		return ret;
818 	}
819 
820 	mm = get_task_mm(etnaviv_obj->userptr.task);
821 	pinned = 0;
822 	if (mm == current->mm) {
823 		pvec = drm_malloc_ab(npages, sizeof(struct page *));
824 		if (!pvec) {
825 			mmput(mm);
826 			return -ENOMEM;
827 		}
828 
829 		pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
830 					       !etnaviv_obj->userptr.ro, pvec);
831 		if (pinned < 0) {
832 			drm_free_large(pvec);
833 			mmput(mm);
834 			return pinned;
835 		}
836 
837 		if (pinned == npages) {
838 			etnaviv_obj->pages = pvec;
839 			mmput(mm);
840 			return 0;
841 		}
842 	}
843 
844 	release_pages(pvec, pinned, 0);
845 	drm_free_large(pvec);
846 
847 	work = kmalloc(sizeof(*work), GFP_KERNEL);
848 	if (!work) {
849 		mmput(mm);
850 		return -ENOMEM;
851 	}
852 
853 	get_task_struct(current);
854 	drm_gem_object_reference(&etnaviv_obj->base);
855 
856 	work->mm = mm;
857 	work->task = current;
858 	work->etnaviv_obj = etnaviv_obj;
859 
860 	etnaviv_obj->userptr.work = &work->work;
861 	INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
862 
863 	etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
864 
865 	return -EAGAIN;
866 }
867 
868 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
869 {
870 	if (etnaviv_obj->sgt) {
871 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
872 		sg_free_table(etnaviv_obj->sgt);
873 		kfree(etnaviv_obj->sgt);
874 	}
875 	if (etnaviv_obj->pages) {
876 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
877 
878 		release_pages(etnaviv_obj->pages, npages, 0);
879 		drm_free_large(etnaviv_obj->pages);
880 	}
881 	put_task_struct(etnaviv_obj->userptr.task);
882 }
883 
884 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
885 		struct vm_area_struct *vma)
886 {
887 	return -EINVAL;
888 }
889 
890 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
891 	.get_pages = etnaviv_gem_userptr_get_pages,
892 	.release = etnaviv_gem_userptr_release,
893 	.vmap = etnaviv_gem_vmap_impl,
894 	.mmap = etnaviv_gem_userptr_mmap_obj,
895 };
896 
897 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
898 	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
899 {
900 	struct etnaviv_gem_object *etnaviv_obj;
901 	int ret;
902 
903 	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
904 				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
905 	if (ret)
906 		return ret;
907 
908 	etnaviv_obj->userptr.ptr = ptr;
909 	etnaviv_obj->userptr.task = current;
910 	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
911 	get_task_struct(current);
912 
913 	ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
914 	if (ret)
915 		goto unreference;
916 
917 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
918 unreference:
919 	/* drop reference from allocate - handle holds it now */
920 	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
921 	return ret;
922 }
923