xref: /openbmc/linux/drivers/gpu/drm/etnaviv/etnaviv_gem.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16 
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
19 
etnaviv_gem_scatter_map(struct etnaviv_gem_object * etnaviv_obj)20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21 {
22 	struct drm_device *dev = etnaviv_obj->base.dev;
23 	struct sg_table *sgt = etnaviv_obj->sgt;
24 
25 	/*
26 	 * For non-cached buffers, ensure the new pages are clean
27 	 * because display controller, GPU, etc. are not coherent.
28 	 */
29 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 		dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31 }
32 
etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object * etnaviv_obj)33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34 {
35 	struct drm_device *dev = etnaviv_obj->base.dev;
36 	struct sg_table *sgt = etnaviv_obj->sgt;
37 
38 	/*
39 	 * For non-cached buffers, ensure the new pages are clean
40 	 * because display controller, GPU, etc. are not coherent:
41 	 *
42 	 * WARNING: The DMA API does not support concurrent CPU
43 	 * and device access to the memory area.  With BIDIRECTIONAL,
44 	 * we will clean the cache lines which overlap the region,
45 	 * and invalidate all cache lines (partially) contained in
46 	 * the region.
47 	 *
48 	 * If you have dirty data in the overlapping cache lines,
49 	 * that will corrupt the GPU-written data.  If you have
50 	 * written into the remainder of the region, this can
51 	 * discard those writes.
52 	 */
53 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 		dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55 }
56 
57 /* called with etnaviv_obj->lock held */
etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object * etnaviv_obj)58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59 {
60 	struct drm_device *dev = etnaviv_obj->base.dev;
61 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62 
63 	if (IS_ERR(p)) {
64 		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 		return PTR_ERR(p);
66 	}
67 
68 	etnaviv_obj->pages = p;
69 
70 	return 0;
71 }
72 
put_pages(struct etnaviv_gem_object * etnaviv_obj)73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74 {
75 	if (etnaviv_obj->sgt) {
76 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 		sg_free_table(etnaviv_obj->sgt);
78 		kfree(etnaviv_obj->sgt);
79 		etnaviv_obj->sgt = NULL;
80 	}
81 	if (etnaviv_obj->pages) {
82 		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 				  true, false);
84 
85 		etnaviv_obj->pages = NULL;
86 	}
87 }
88 
etnaviv_gem_get_pages(struct etnaviv_gem_object * etnaviv_obj)89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90 {
91 	int ret;
92 
93 	lockdep_assert_held(&etnaviv_obj->lock);
94 
95 	if (!etnaviv_obj->pages) {
96 		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 		if (ret < 0)
98 			return ERR_PTR(ret);
99 	}
100 
101 	if (!etnaviv_obj->sgt) {
102 		struct drm_device *dev = etnaviv_obj->base.dev;
103 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 		struct sg_table *sgt;
105 
106 		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107 					    etnaviv_obj->pages, npages);
108 		if (IS_ERR(sgt)) {
109 			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 				PTR_ERR(sgt));
111 			return ERR_CAST(sgt);
112 		}
113 
114 		etnaviv_obj->sgt = sgt;
115 
116 		etnaviv_gem_scatter_map(etnaviv_obj);
117 	}
118 
119 	return etnaviv_obj->pages;
120 }
121 
etnaviv_gem_put_pages(struct etnaviv_gem_object * etnaviv_obj)122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 {
124 	lockdep_assert_held(&etnaviv_obj->lock);
125 	/* when we start tracking the pin count, then do something here */
126 }
127 
etnaviv_gem_mmap_obj(struct etnaviv_gem_object * etnaviv_obj,struct vm_area_struct * vma)128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129 		struct vm_area_struct *vma)
130 {
131 	pgprot_t vm_page_prot;
132 
133 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
134 
135 	vm_page_prot = vm_get_page_prot(vma->vm_flags);
136 
137 	if (etnaviv_obj->flags & ETNA_BO_WC) {
138 		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139 	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140 		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141 	} else {
142 		/*
143 		 * Shunt off cached objs to shmem file so they have their own
144 		 * address_space (so unmap_mapping_range does what we want,
145 		 * in particular in the case of mmap'd dmabufs)
146 		 */
147 		vma->vm_pgoff = 0;
148 		vma_set_file(vma, etnaviv_obj->base.filp);
149 
150 		vma->vm_page_prot = vm_page_prot;
151 	}
152 
153 	return 0;
154 }
155 
etnaviv_gem_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)156 static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
157 {
158 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
159 
160 	return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
161 }
162 
etnaviv_gem_fault(struct vm_fault * vmf)163 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
164 {
165 	struct vm_area_struct *vma = vmf->vma;
166 	struct drm_gem_object *obj = vma->vm_private_data;
167 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
168 	struct page **pages;
169 	unsigned long pfn;
170 	pgoff_t pgoff;
171 	int err;
172 
173 	/*
174 	 * Make sure we don't parallel update on a fault, nor move or remove
175 	 * something from beneath our feet.  Note that vmf_insert_page() is
176 	 * specifically coded to take care of this, so we don't have to.
177 	 */
178 	err = mutex_lock_interruptible(&etnaviv_obj->lock);
179 	if (err)
180 		return VM_FAULT_NOPAGE;
181 	/* make sure we have pages attached now */
182 	pages = etnaviv_gem_get_pages(etnaviv_obj);
183 	mutex_unlock(&etnaviv_obj->lock);
184 
185 	if (IS_ERR(pages)) {
186 		err = PTR_ERR(pages);
187 		return vmf_error(err);
188 	}
189 
190 	/* We don't use vmf->pgoff since that has the fake offset: */
191 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
192 
193 	pfn = page_to_pfn(pages[pgoff]);
194 
195 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
196 	     pfn, pfn << PAGE_SHIFT);
197 
198 	return vmf_insert_pfn(vma, vmf->address, pfn);
199 }
200 
etnaviv_gem_mmap_offset(struct drm_gem_object * obj,u64 * offset)201 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
202 {
203 	int ret;
204 
205 	/* Make it mmapable */
206 	ret = drm_gem_create_mmap_offset(obj);
207 	if (ret)
208 		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
209 	else
210 		*offset = drm_vma_node_offset_addr(&obj->vma_node);
211 
212 	return ret;
213 }
214 
215 static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object * obj,struct etnaviv_iommu_context * context)216 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
217 			     struct etnaviv_iommu_context *context)
218 {
219 	struct etnaviv_vram_mapping *mapping;
220 
221 	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
222 		if (mapping->context == context)
223 			return mapping;
224 	}
225 
226 	return NULL;
227 }
228 
etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping * mapping)229 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
230 {
231 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
232 
233 	mutex_lock(&etnaviv_obj->lock);
234 	WARN_ON(mapping->use == 0);
235 	mapping->use -= 1;
236 	mutex_unlock(&etnaviv_obj->lock);
237 
238 	drm_gem_object_put(&etnaviv_obj->base);
239 }
240 
etnaviv_gem_mapping_get(struct drm_gem_object * obj,struct etnaviv_iommu_context * mmu_context,u64 va)241 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
242 	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
243 	u64 va)
244 {
245 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
246 	struct etnaviv_vram_mapping *mapping;
247 	struct page **pages;
248 	int ret = 0;
249 
250 	mutex_lock(&etnaviv_obj->lock);
251 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
252 	if (mapping) {
253 		/*
254 		 * Holding the object lock prevents the use count changing
255 		 * beneath us.  If the use count is zero, the MMU might be
256 		 * reaping this object, so take the lock and re-check that
257 		 * the MMU owns this mapping to close this race.
258 		 */
259 		if (mapping->use == 0) {
260 			mutex_lock(&mmu_context->lock);
261 			if (mapping->context == mmu_context)
262 				if (va && mapping->iova != va) {
263 					etnaviv_iommu_reap_mapping(mapping);
264 					mapping = NULL;
265 				} else {
266 					mapping->use += 1;
267 				}
268 			else
269 				mapping = NULL;
270 			mutex_unlock(&mmu_context->lock);
271 			if (mapping)
272 				goto out;
273 		} else {
274 			mapping->use += 1;
275 			goto out;
276 		}
277 	}
278 
279 	pages = etnaviv_gem_get_pages(etnaviv_obj);
280 	if (IS_ERR(pages)) {
281 		ret = PTR_ERR(pages);
282 		goto out;
283 	}
284 
285 	/*
286 	 * See if we have a reaped vram mapping we can re-use before
287 	 * allocating a fresh mapping.
288 	 */
289 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
290 	if (!mapping) {
291 		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
292 		if (!mapping) {
293 			ret = -ENOMEM;
294 			goto out;
295 		}
296 
297 		INIT_LIST_HEAD(&mapping->scan_node);
298 		mapping->object = etnaviv_obj;
299 	} else {
300 		list_del(&mapping->obj_node);
301 	}
302 
303 	mapping->use = 1;
304 
305 	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
306 				    mmu_context->global->memory_base,
307 				    mapping, va);
308 	if (ret < 0)
309 		kfree(mapping);
310 	else
311 		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
312 
313 out:
314 	mutex_unlock(&etnaviv_obj->lock);
315 
316 	if (ret)
317 		return ERR_PTR(ret);
318 
319 	/* Take a reference on the object */
320 	drm_gem_object_get(obj);
321 	return mapping;
322 }
323 
etnaviv_gem_vmap(struct drm_gem_object * obj)324 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
325 {
326 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
327 
328 	if (etnaviv_obj->vaddr)
329 		return etnaviv_obj->vaddr;
330 
331 	mutex_lock(&etnaviv_obj->lock);
332 	/*
333 	 * Need to check again, as we might have raced with another thread
334 	 * while waiting for the mutex.
335 	 */
336 	if (!etnaviv_obj->vaddr)
337 		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
338 	mutex_unlock(&etnaviv_obj->lock);
339 
340 	return etnaviv_obj->vaddr;
341 }
342 
etnaviv_gem_vmap_impl(struct etnaviv_gem_object * obj)343 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
344 {
345 	struct page **pages;
346 	pgprot_t prot;
347 
348 	lockdep_assert_held(&obj->lock);
349 
350 	pages = etnaviv_gem_get_pages(obj);
351 	if (IS_ERR(pages))
352 		return NULL;
353 
354 	switch (obj->flags & ETNA_BO_CACHE_MASK) {
355 	case ETNA_BO_CACHED:
356 		prot = PAGE_KERNEL;
357 		break;
358 	case ETNA_BO_UNCACHED:
359 		prot = pgprot_noncached(PAGE_KERNEL);
360 		break;
361 	case ETNA_BO_WC:
362 	default:
363 		prot = pgprot_writecombine(PAGE_KERNEL);
364 	}
365 
366 	return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
367 }
368 
etnaviv_op_to_dma_dir(u32 op)369 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
370 {
371 	op &= ETNA_PREP_READ | ETNA_PREP_WRITE;
372 
373 	if (op == ETNA_PREP_READ)
374 		return DMA_FROM_DEVICE;
375 	else if (op == ETNA_PREP_WRITE)
376 		return DMA_TO_DEVICE;
377 	else
378 		return DMA_BIDIRECTIONAL;
379 }
380 
etnaviv_gem_cpu_prep(struct drm_gem_object * obj,u32 op,struct drm_etnaviv_timespec * timeout)381 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
382 		struct drm_etnaviv_timespec *timeout)
383 {
384 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
385 	struct drm_device *dev = obj->dev;
386 	bool write = !!(op & ETNA_PREP_WRITE);
387 	int ret;
388 
389 	if (!etnaviv_obj->sgt) {
390 		void *ret;
391 
392 		mutex_lock(&etnaviv_obj->lock);
393 		ret = etnaviv_gem_get_pages(etnaviv_obj);
394 		mutex_unlock(&etnaviv_obj->lock);
395 		if (IS_ERR(ret))
396 			return PTR_ERR(ret);
397 	}
398 
399 	if (op & ETNA_PREP_NOSYNC) {
400 		if (!dma_resv_test_signaled(obj->resv,
401 					    dma_resv_usage_rw(write)))
402 			return -EBUSY;
403 	} else {
404 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
405 
406 		ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
407 					    true, remain);
408 		if (ret <= 0)
409 			return ret == 0 ? -ETIMEDOUT : ret;
410 	}
411 
412 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
413 		dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
414 					 etnaviv_op_to_dma_dir(op));
415 		etnaviv_obj->last_cpu_prep_op = op;
416 	}
417 
418 	return 0;
419 }
420 
etnaviv_gem_cpu_fini(struct drm_gem_object * obj)421 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
422 {
423 	struct drm_device *dev = obj->dev;
424 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
425 
426 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
427 		/* fini without a prep is almost certainly a userspace error */
428 		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
429 		dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
430 			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
431 		etnaviv_obj->last_cpu_prep_op = 0;
432 	}
433 
434 	return 0;
435 }
436 
etnaviv_gem_wait_bo(struct etnaviv_gpu * gpu,struct drm_gem_object * obj,struct drm_etnaviv_timespec * timeout)437 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
438 	struct drm_etnaviv_timespec *timeout)
439 {
440 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
441 
442 	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
443 }
444 
445 #ifdef CONFIG_DEBUG_FS
etnaviv_gem_describe(struct drm_gem_object * obj,struct seq_file * m)446 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
447 {
448 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
449 	struct dma_resv *robj = obj->resv;
450 	unsigned long off = drm_vma_node_start(&obj->vma_node);
451 	int r;
452 
453 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
454 			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
455 			obj->name, kref_read(&obj->refcount),
456 			off, etnaviv_obj->vaddr, obj->size);
457 
458 	r = dma_resv_lock(robj, NULL);
459 	if (r)
460 		return;
461 
462 	dma_resv_describe(robj, m);
463 	dma_resv_unlock(robj);
464 }
465 
etnaviv_gem_describe_objects(struct etnaviv_drm_private * priv,struct seq_file * m)466 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
467 	struct seq_file *m)
468 {
469 	struct etnaviv_gem_object *etnaviv_obj;
470 	int count = 0;
471 	size_t size = 0;
472 
473 	mutex_lock(&priv->gem_lock);
474 	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
475 		struct drm_gem_object *obj = &etnaviv_obj->base;
476 
477 		seq_puts(m, "   ");
478 		etnaviv_gem_describe(obj, m);
479 		count++;
480 		size += obj->size;
481 	}
482 	mutex_unlock(&priv->gem_lock);
483 
484 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
485 }
486 #endif
487 
etnaviv_gem_shmem_release(struct etnaviv_gem_object * etnaviv_obj)488 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
489 {
490 	vunmap(etnaviv_obj->vaddr);
491 	put_pages(etnaviv_obj);
492 }
493 
494 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
495 	.get_pages = etnaviv_gem_shmem_get_pages,
496 	.release = etnaviv_gem_shmem_release,
497 	.vmap = etnaviv_gem_vmap_impl,
498 	.mmap = etnaviv_gem_mmap_obj,
499 };
500 
etnaviv_gem_free_object(struct drm_gem_object * obj)501 void etnaviv_gem_free_object(struct drm_gem_object *obj)
502 {
503 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
504 	struct etnaviv_drm_private *priv = obj->dev->dev_private;
505 	struct etnaviv_vram_mapping *mapping, *tmp;
506 
507 	/* object should not be active */
508 	WARN_ON(is_active(etnaviv_obj));
509 
510 	mutex_lock(&priv->gem_lock);
511 	list_del(&etnaviv_obj->gem_node);
512 	mutex_unlock(&priv->gem_lock);
513 
514 	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
515 				 obj_node) {
516 		struct etnaviv_iommu_context *context = mapping->context;
517 
518 		WARN_ON(mapping->use);
519 
520 		if (context)
521 			etnaviv_iommu_unmap_gem(context, mapping);
522 
523 		list_del(&mapping->obj_node);
524 		kfree(mapping);
525 	}
526 
527 	etnaviv_obj->ops->release(etnaviv_obj);
528 	drm_gem_object_release(obj);
529 
530 	kfree(etnaviv_obj);
531 }
532 
etnaviv_gem_obj_add(struct drm_device * dev,struct drm_gem_object * obj)533 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
534 {
535 	struct etnaviv_drm_private *priv = dev->dev_private;
536 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
537 
538 	mutex_lock(&priv->gem_lock);
539 	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
540 	mutex_unlock(&priv->gem_lock);
541 }
542 
543 static const struct vm_operations_struct vm_ops = {
544 	.fault = etnaviv_gem_fault,
545 	.open = drm_gem_vm_open,
546 	.close = drm_gem_vm_close,
547 };
548 
549 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
550 	.free = etnaviv_gem_free_object,
551 	.pin = etnaviv_gem_prime_pin,
552 	.unpin = etnaviv_gem_prime_unpin,
553 	.get_sg_table = etnaviv_gem_prime_get_sg_table,
554 	.vmap = etnaviv_gem_prime_vmap,
555 	.mmap = etnaviv_gem_mmap,
556 	.vm_ops = &vm_ops,
557 };
558 
etnaviv_gem_new_impl(struct drm_device * dev,u32 size,u32 flags,const struct etnaviv_gem_ops * ops,struct drm_gem_object ** obj)559 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
560 	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
561 {
562 	struct etnaviv_gem_object *etnaviv_obj;
563 	unsigned sz = sizeof(*etnaviv_obj);
564 	bool valid = true;
565 
566 	/* validate flags */
567 	switch (flags & ETNA_BO_CACHE_MASK) {
568 	case ETNA_BO_UNCACHED:
569 	case ETNA_BO_CACHED:
570 	case ETNA_BO_WC:
571 		break;
572 	default:
573 		valid = false;
574 	}
575 
576 	if (!valid) {
577 		dev_err(dev->dev, "invalid cache flag: %x\n",
578 			(flags & ETNA_BO_CACHE_MASK));
579 		return -EINVAL;
580 	}
581 
582 	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
583 	if (!etnaviv_obj)
584 		return -ENOMEM;
585 
586 	etnaviv_obj->flags = flags;
587 	etnaviv_obj->ops = ops;
588 
589 	mutex_init(&etnaviv_obj->lock);
590 	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
591 
592 	*obj = &etnaviv_obj->base;
593 	(*obj)->funcs = &etnaviv_gem_object_funcs;
594 
595 	return 0;
596 }
597 
598 /* convenience method to construct a GEM buffer object, and userspace handle */
etnaviv_gem_new_handle(struct drm_device * dev,struct drm_file * file,u32 size,u32 flags,u32 * handle)599 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
600 	u32 size, u32 flags, u32 *handle)
601 {
602 	struct etnaviv_drm_private *priv = dev->dev_private;
603 	struct drm_gem_object *obj = NULL;
604 	int ret;
605 
606 	size = PAGE_ALIGN(size);
607 
608 	ret = etnaviv_gem_new_impl(dev, size, flags,
609 				   &etnaviv_gem_shmem_ops, &obj);
610 	if (ret)
611 		goto fail;
612 
613 	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
614 
615 	ret = drm_gem_object_init(dev, obj, size);
616 	if (ret)
617 		goto fail;
618 
619 	/*
620 	 * Our buffers are kept pinned, so allocating them from the MOVABLE
621 	 * zone is a really bad idea, and conflicts with CMA. See comments
622 	 * above new_inode() why this is required _and_ expected if you're
623 	 * going to pin these pages.
624 	 */
625 	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
626 
627 	etnaviv_gem_obj_add(dev, obj);
628 
629 	ret = drm_gem_handle_create(file, obj, handle);
630 
631 	/* drop reference from allocate - handle holds it now */
632 fail:
633 	drm_gem_object_put(obj);
634 
635 	return ret;
636 }
637 
etnaviv_gem_new_private(struct drm_device * dev,size_t size,u32 flags,const struct etnaviv_gem_ops * ops,struct etnaviv_gem_object ** res)638 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
639 	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
640 {
641 	struct drm_gem_object *obj;
642 	int ret;
643 
644 	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
645 	if (ret)
646 		return ret;
647 
648 	drm_gem_private_object_init(dev, obj, size);
649 
650 	*res = to_etnaviv_bo(obj);
651 
652 	return 0;
653 }
654 
etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object * etnaviv_obj)655 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
656 {
657 	struct page **pvec = NULL;
658 	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
659 	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
660 	unsigned int gup_flags = FOLL_LONGTERM;
661 
662 	might_lock_read(&current->mm->mmap_lock);
663 
664 	if (userptr->mm != current->mm)
665 		return -EPERM;
666 
667 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
668 	if (!pvec)
669 		return -ENOMEM;
670 
671 	if (!userptr->ro)
672 		gup_flags |= FOLL_WRITE;
673 
674 	do {
675 		unsigned num_pages = npages - pinned;
676 		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
677 		struct page **pages = pvec + pinned;
678 
679 		ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
680 		if (ret < 0) {
681 			unpin_user_pages(pvec, pinned);
682 			kvfree(pvec);
683 			return ret;
684 		}
685 
686 		pinned += ret;
687 
688 	} while (pinned < npages);
689 
690 	etnaviv_obj->pages = pvec;
691 
692 	return 0;
693 }
694 
etnaviv_gem_userptr_release(struct etnaviv_gem_object * etnaviv_obj)695 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
696 {
697 	if (etnaviv_obj->sgt) {
698 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
699 		sg_free_table(etnaviv_obj->sgt);
700 		kfree(etnaviv_obj->sgt);
701 	}
702 	if (etnaviv_obj->pages) {
703 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
704 
705 		unpin_user_pages(etnaviv_obj->pages, npages);
706 		kvfree(etnaviv_obj->pages);
707 	}
708 }
709 
etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object * etnaviv_obj,struct vm_area_struct * vma)710 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
711 		struct vm_area_struct *vma)
712 {
713 	return -EINVAL;
714 }
715 
716 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
717 	.get_pages = etnaviv_gem_userptr_get_pages,
718 	.release = etnaviv_gem_userptr_release,
719 	.vmap = etnaviv_gem_vmap_impl,
720 	.mmap = etnaviv_gem_userptr_mmap_obj,
721 };
722 
etnaviv_gem_new_userptr(struct drm_device * dev,struct drm_file * file,uintptr_t ptr,u32 size,u32 flags,u32 * handle)723 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
724 	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
725 {
726 	struct etnaviv_gem_object *etnaviv_obj;
727 	int ret;
728 
729 	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
730 				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
731 	if (ret)
732 		return ret;
733 
734 	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
735 
736 	etnaviv_obj->userptr.ptr = ptr;
737 	etnaviv_obj->userptr.mm = current->mm;
738 	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
739 
740 	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
741 
742 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
743 
744 	/* drop reference from allocate - handle holds it now */
745 	drm_gem_object_put(&etnaviv_obj->base);
746 	return ret;
747 }
748