xref: /openbmc/linux/drivers/gpu/drm/omapdrm/omap_gem.c (revision 4f3db074)
1 /*
2  * drivers/gpu/drm/omapdrm/omap_gem.c
3  *
4  * Copyright (C) 2011 Texas Instruments
5  * Author: Rob Clark <rob.clark@linaro.org>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
23 #include <drm/drm_vma_manager.h>
24 
25 #include "omap_drv.h"
26 #include "omap_dmm_tiler.h"
27 
28 /* remove these once drm core helpers are merged */
29 struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 		bool dirty, bool accessed);
32 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
33 
34 /*
35  * GEM buffer object implementation.
36  */
37 
38 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39 
40 /* note: we use upper 8 bits of flags for driver-internal flags: */
41 #define OMAP_BO_DMA			0x01000000	/* actually is physically contiguous */
42 #define OMAP_BO_EXT_SYNC	0x02000000	/* externally allocated sync object */
43 #define OMAP_BO_EXT_MEM		0x04000000	/* externally allocated memory */
44 
45 
46 struct omap_gem_object {
47 	struct drm_gem_object base;
48 
49 	struct list_head mm_list;
50 
51 	uint32_t flags;
52 
53 	/** width/height for tiled formats (rounded up to slot boundaries) */
54 	uint16_t width, height;
55 
56 	/** roll applied when mapping to DMM */
57 	uint32_t roll;
58 
59 	/**
60 	 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
61 	 * is set and the paddr is valid.  Also if the buffer is remapped in
62 	 * TILER and paddr_cnt > 0, then paddr is valid.  But if you are using
63 	 * the physical address and OMAP_BO_DMA is not set, then you should
64 	 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
65 	 * not removed from under your feet.
66 	 *
67 	 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
68 	 * buffer is requested, but doesn't mean that it is.  Use the
69 	 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
70 	 * physical address.
71 	 */
72 	dma_addr_t paddr;
73 
74 	/**
75 	 * # of users of paddr
76 	 */
77 	uint32_t paddr_cnt;
78 
79 	/**
80 	 * tiler block used when buffer is remapped in DMM/TILER.
81 	 */
82 	struct tiler_block *block;
83 
84 	/**
85 	 * Array of backing pages, if allocated.  Note that pages are never
86 	 * allocated for buffers originally allocated from contiguous memory
87 	 */
88 	struct page **pages;
89 
90 	/** addresses corresponding to pages in above array */
91 	dma_addr_t *addrs;
92 
93 	/**
94 	 * Virtual address, if mapped.
95 	 */
96 	void *vaddr;
97 
98 	/**
99 	 * sync-object allocated on demand (if needed)
100 	 *
101 	 * Per-buffer sync-object for tracking pending and completed hw/dma
102 	 * read and write operations.  The layout in memory is dictated by
103 	 * the SGX firmware, which uses this information to stall the command
104 	 * stream if a surface is not ready yet.
105 	 *
106 	 * Note that when buffer is used by SGX, the sync-object needs to be
107 	 * allocated from a special heap of sync-objects.  This way many sync
108 	 * objects can be packed in a page, and not waste GPU virtual address
109 	 * space.  Because of this we have to have a omap_gem_set_sync_object()
110 	 * API to allow replacement of the syncobj after it has (potentially)
111 	 * already been allocated.  A bit ugly but I haven't thought of a
112 	 * better alternative.
113 	 */
114 	struct {
115 		uint32_t write_pending;
116 		uint32_t write_complete;
117 		uint32_t read_pending;
118 		uint32_t read_complete;
119 	} *sync;
120 };
121 
122 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
123 static uint64_t mmap_offset(struct drm_gem_object *obj);
124 
125 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
126  * not necessarily pinned in TILER all the time, and (b) when they are
127  * they are not necessarily page aligned, we reserve one or more small
128  * regions in each of the 2d containers to use as a user-GART where we
129  * can create a second page-aligned mapping of parts of the buffer
130  * being accessed from userspace.
131  *
132  * Note that we could optimize slightly when we know that multiple
133  * tiler containers are backed by the same PAT.. but I'll leave that
134  * for later..
135  */
136 #define NUM_USERGART_ENTRIES 2
137 struct usergart_entry {
138 	struct tiler_block *block;	/* the reserved tiler block */
139 	dma_addr_t paddr;
140 	struct drm_gem_object *obj;	/* the current pinned obj */
141 	pgoff_t obj_pgoff;		/* page offset of obj currently
142 					   mapped in */
143 };
144 static struct {
145 	struct usergart_entry entry[NUM_USERGART_ENTRIES];
146 	int height;				/* height in rows */
147 	int height_shift;		/* ilog2(height in rows) */
148 	int slot_shift;			/* ilog2(width per slot) */
149 	int stride_pfn;			/* stride in pages */
150 	int last;				/* index of last used entry */
151 } *usergart;
152 
153 static void evict_entry(struct drm_gem_object *obj,
154 		enum tiler_fmt fmt, struct usergart_entry *entry)
155 {
156 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 	int n = usergart[fmt].height;
158 	size_t size = PAGE_SIZE * n;
159 	loff_t off = mmap_offset(obj) +
160 			(entry->obj_pgoff << PAGE_SHIFT);
161 	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162 
163 	if (m > 1) {
164 		int i;
165 		/* if stride > than PAGE_SIZE then sparse mapping: */
166 		for (i = n; i > 0; i--) {
167 			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
168 					    off, PAGE_SIZE, 1);
169 			off += PAGE_SIZE * m;
170 		}
171 	} else {
172 		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
173 				    off, size, 1);
174 	}
175 
176 	entry->obj = NULL;
177 }
178 
179 /* Evict a buffer from usergart, if it is mapped there */
180 static void evict(struct drm_gem_object *obj)
181 {
182 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
183 
184 	if (omap_obj->flags & OMAP_BO_TILED) {
185 		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
186 		int i;
187 
188 		if (!usergart)
189 			return;
190 
191 		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
192 			struct usergart_entry *entry = &usergart[fmt].entry[i];
193 			if (entry->obj == obj)
194 				evict_entry(obj, fmt, entry);
195 		}
196 	}
197 }
198 
199 /* GEM objects can either be allocated from contiguous memory (in which
200  * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
201  * contiguous buffers can be remapped in TILER/DMM if they need to be
202  * contiguous... but we don't do this all the time to reduce pressure
203  * on TILER/DMM space when we know at allocation time that the buffer
204  * will need to be scanned out.
205  */
206 static inline bool is_shmem(struct drm_gem_object *obj)
207 {
208 	return obj->filp != NULL;
209 }
210 
211 /**
212  * shmem buffers that are mapped cached can simulate coherency via using
213  * page faulting to keep track of dirty pages
214  */
215 static inline bool is_cached_coherent(struct drm_gem_object *obj)
216 {
217 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
218 	return is_shmem(obj) &&
219 		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
220 }
221 
222 static DEFINE_SPINLOCK(sync_lock);
223 
224 /** ensure backing pages are allocated */
225 static int omap_gem_attach_pages(struct drm_gem_object *obj)
226 {
227 	struct drm_device *dev = obj->dev;
228 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
229 	struct page **pages;
230 	int npages = obj->size >> PAGE_SHIFT;
231 	int i, ret;
232 	dma_addr_t *addrs;
233 
234 	WARN_ON(omap_obj->pages);
235 
236 	pages = drm_gem_get_pages(obj);
237 	if (IS_ERR(pages)) {
238 		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
239 		return PTR_ERR(pages);
240 	}
241 
242 	/* for non-cached buffers, ensure the new pages are clean because
243 	 * DSS, GPU, etc. are not cache coherent:
244 	 */
245 	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
246 		addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
247 		if (!addrs) {
248 			ret = -ENOMEM;
249 			goto free_pages;
250 		}
251 
252 		for (i = 0; i < npages; i++) {
253 			addrs[i] = dma_map_page(dev->dev, pages[i],
254 					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
255 		}
256 	} else {
257 		addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
258 		if (!addrs) {
259 			ret = -ENOMEM;
260 			goto free_pages;
261 		}
262 	}
263 
264 	omap_obj->addrs = addrs;
265 	omap_obj->pages = pages;
266 
267 	return 0;
268 
269 free_pages:
270 	drm_gem_put_pages(obj, pages, true, false);
271 
272 	return ret;
273 }
274 
275 /** release backing pages */
276 static void omap_gem_detach_pages(struct drm_gem_object *obj)
277 {
278 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
279 
280 	/* for non-cached buffers, ensure the new pages are clean because
281 	 * DSS, GPU, etc. are not cache coherent:
282 	 */
283 	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
284 		int i, npages = obj->size >> PAGE_SHIFT;
285 		for (i = 0; i < npages; i++) {
286 			dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
287 					PAGE_SIZE, DMA_BIDIRECTIONAL);
288 		}
289 	}
290 
291 	kfree(omap_obj->addrs);
292 	omap_obj->addrs = NULL;
293 
294 	drm_gem_put_pages(obj, omap_obj->pages, true, false);
295 	omap_obj->pages = NULL;
296 }
297 
298 /* get buffer flags */
299 uint32_t omap_gem_flags(struct drm_gem_object *obj)
300 {
301 	return to_omap_bo(obj)->flags;
302 }
303 
304 /** get mmap offset */
305 static uint64_t mmap_offset(struct drm_gem_object *obj)
306 {
307 	struct drm_device *dev = obj->dev;
308 	int ret;
309 	size_t size;
310 
311 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
312 
313 	/* Make it mmapable */
314 	size = omap_gem_mmap_size(obj);
315 	ret = drm_gem_create_mmap_offset_size(obj, size);
316 	if (ret) {
317 		dev_err(dev->dev, "could not allocate mmap offset\n");
318 		return 0;
319 	}
320 
321 	return drm_vma_node_offset_addr(&obj->vma_node);
322 }
323 
324 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
325 {
326 	uint64_t offset;
327 	mutex_lock(&obj->dev->struct_mutex);
328 	offset = mmap_offset(obj);
329 	mutex_unlock(&obj->dev->struct_mutex);
330 	return offset;
331 }
332 
333 /** get mmap size */
334 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
335 {
336 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
337 	size_t size = obj->size;
338 
339 	if (omap_obj->flags & OMAP_BO_TILED) {
340 		/* for tiled buffers, the virtual size has stride rounded up
341 		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
342 		 * 32kb later!).  But we don't back the entire buffer with
343 		 * pages, only the valid picture part.. so need to adjust for
344 		 * this in the size used to mmap and generate mmap offset
345 		 */
346 		size = tiler_vsize(gem2fmt(omap_obj->flags),
347 				omap_obj->width, omap_obj->height);
348 	}
349 
350 	return size;
351 }
352 
353 /* get tiled size, returns -EINVAL if not tiled buffer */
354 int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
355 {
356 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
357 	if (omap_obj->flags & OMAP_BO_TILED) {
358 		*w = omap_obj->width;
359 		*h = omap_obj->height;
360 		return 0;
361 	}
362 	return -EINVAL;
363 }
364 
365 /* Normal handling for the case of faulting in non-tiled buffers */
366 static int fault_1d(struct drm_gem_object *obj,
367 		struct vm_area_struct *vma, struct vm_fault *vmf)
368 {
369 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
370 	unsigned long pfn;
371 	pgoff_t pgoff;
372 
373 	/* We don't use vmf->pgoff since that has the fake offset: */
374 	pgoff = ((unsigned long)vmf->virtual_address -
375 			vma->vm_start) >> PAGE_SHIFT;
376 
377 	if (omap_obj->pages) {
378 		omap_gem_cpu_sync(obj, pgoff);
379 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
380 	} else {
381 		BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
382 		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
383 	}
384 
385 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
386 			pfn, pfn << PAGE_SHIFT);
387 
388 	return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
389 }
390 
391 /* Special handling for the case of faulting in 2d tiled buffers */
392 static int fault_2d(struct drm_gem_object *obj,
393 		struct vm_area_struct *vma, struct vm_fault *vmf)
394 {
395 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
396 	struct usergart_entry *entry;
397 	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
398 	struct page *pages[64];  /* XXX is this too much to have on stack? */
399 	unsigned long pfn;
400 	pgoff_t pgoff, base_pgoff;
401 	void __user *vaddr;
402 	int i, ret, slots;
403 
404 	/*
405 	 * Note the height of the slot is also equal to the number of pages
406 	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
407 	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
408 	 */
409 	const int n = usergart[fmt].height;
410 	const int n_shift = usergart[fmt].height_shift;
411 
412 	/*
413 	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
414 	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
415 	 * into account in some of the math, so figure out virtual stride
416 	 * in pages
417 	 */
418 	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
419 
420 	/* We don't use vmf->pgoff since that has the fake offset: */
421 	pgoff = ((unsigned long)vmf->virtual_address -
422 			vma->vm_start) >> PAGE_SHIFT;
423 
424 	/*
425 	 * Actual address we start mapping at is rounded down to previous slot
426 	 * boundary in the y direction:
427 	 */
428 	base_pgoff = round_down(pgoff, m << n_shift);
429 
430 	/* figure out buffer width in slots */
431 	slots = omap_obj->width >> usergart[fmt].slot_shift;
432 
433 	vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
434 
435 	entry = &usergart[fmt].entry[usergart[fmt].last];
436 
437 	/* evict previous buffer using this usergart entry, if any: */
438 	if (entry->obj)
439 		evict_entry(entry->obj, fmt, entry);
440 
441 	entry->obj = obj;
442 	entry->obj_pgoff = base_pgoff;
443 
444 	/* now convert base_pgoff to phys offset from virt offset: */
445 	base_pgoff = (base_pgoff >> n_shift) * slots;
446 
447 	/* for wider-than 4k.. figure out which part of the slot-row we want: */
448 	if (m > 1) {
449 		int off = pgoff % m;
450 		entry->obj_pgoff += off;
451 		base_pgoff /= m;
452 		slots = min(slots - (off << n_shift), n);
453 		base_pgoff += off << n_shift;
454 		vaddr += off << PAGE_SHIFT;
455 	}
456 
457 	/*
458 	 * Map in pages. Beyond the valid pixel part of the buffer, we set
459 	 * pages[i] to NULL to get a dummy page mapped in.. if someone
460 	 * reads/writes it they will get random/undefined content, but at
461 	 * least it won't be corrupting whatever other random page used to
462 	 * be mapped in, or other undefined behavior.
463 	 */
464 	memcpy(pages, &omap_obj->pages[base_pgoff],
465 			sizeof(struct page *) * slots);
466 	memset(pages + slots, 0,
467 			sizeof(struct page *) * (n - slots));
468 
469 	ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
470 	if (ret) {
471 		dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
472 		return ret;
473 	}
474 
475 	pfn = entry->paddr >> PAGE_SHIFT;
476 
477 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
478 			pfn, pfn << PAGE_SHIFT);
479 
480 	for (i = n; i > 0; i--) {
481 		vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
482 		pfn += usergart[fmt].stride_pfn;
483 		vaddr += PAGE_SIZE * m;
484 	}
485 
486 	/* simple round-robin: */
487 	usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
488 
489 	return 0;
490 }
491 
492 /**
493  * omap_gem_fault		-	pagefault handler for GEM objects
494  * @vma: the VMA of the GEM object
495  * @vmf: fault detail
496  *
497  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
498  * does most of the work for us including the actual map/unmap calls
499  * but we need to do the actual page work.
500  *
501  * The VMA was set up by GEM. In doing so it also ensured that the
502  * vma->vm_private_data points to the GEM object that is backing this
503  * mapping.
504  */
505 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
506 {
507 	struct drm_gem_object *obj = vma->vm_private_data;
508 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
509 	struct drm_device *dev = obj->dev;
510 	struct page **pages;
511 	int ret;
512 
513 	/* Make sure we don't parallel update on a fault, nor move or remove
514 	 * something from beneath our feet
515 	 */
516 	mutex_lock(&dev->struct_mutex);
517 
518 	/* if a shmem backed object, make sure we have pages attached now */
519 	ret = get_pages(obj, &pages);
520 	if (ret)
521 		goto fail;
522 
523 	/* where should we do corresponding put_pages().. we are mapping
524 	 * the original page, rather than thru a GART, so we can't rely
525 	 * on eviction to trigger this.  But munmap() or all mappings should
526 	 * probably trigger put_pages()?
527 	 */
528 
529 	if (omap_obj->flags & OMAP_BO_TILED)
530 		ret = fault_2d(obj, vma, vmf);
531 	else
532 		ret = fault_1d(obj, vma, vmf);
533 
534 
535 fail:
536 	mutex_unlock(&dev->struct_mutex);
537 	switch (ret) {
538 	case 0:
539 	case -ERESTARTSYS:
540 	case -EINTR:
541 		return VM_FAULT_NOPAGE;
542 	case -ENOMEM:
543 		return VM_FAULT_OOM;
544 	default:
545 		return VM_FAULT_SIGBUS;
546 	}
547 }
548 
549 /** We override mainly to fix up some of the vm mapping flags.. */
550 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
551 {
552 	int ret;
553 
554 	ret = drm_gem_mmap(filp, vma);
555 	if (ret) {
556 		DBG("mmap failed: %d", ret);
557 		return ret;
558 	}
559 
560 	return omap_gem_mmap_obj(vma->vm_private_data, vma);
561 }
562 
563 int omap_gem_mmap_obj(struct drm_gem_object *obj,
564 		struct vm_area_struct *vma)
565 {
566 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
567 
568 	vma->vm_flags &= ~VM_PFNMAP;
569 	vma->vm_flags |= VM_MIXEDMAP;
570 
571 	if (omap_obj->flags & OMAP_BO_WC) {
572 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
573 	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
574 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
575 	} else {
576 		/*
577 		 * We do have some private objects, at least for scanout buffers
578 		 * on hardware without DMM/TILER.  But these are allocated write-
579 		 * combine
580 		 */
581 		if (WARN_ON(!obj->filp))
582 			return -EINVAL;
583 
584 		/*
585 		 * Shunt off cached objs to shmem file so they have their own
586 		 * address_space (so unmap_mapping_range does what we want,
587 		 * in particular in the case of mmap'd dmabufs)
588 		 */
589 		fput(vma->vm_file);
590 		vma->vm_pgoff = 0;
591 		vma->vm_file  = get_file(obj->filp);
592 
593 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
594 	}
595 
596 	return 0;
597 }
598 
599 
600 /**
601  * omap_gem_dumb_create	-	create a dumb buffer
602  * @drm_file: our client file
603  * @dev: our device
604  * @args: the requested arguments copied from userspace
605  *
606  * Allocate a buffer suitable for use for a frame buffer of the
607  * form described by user space. Give userspace a handle by which
608  * to reference it.
609  */
610 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
611 		struct drm_mode_create_dumb *args)
612 {
613 	union omap_gem_size gsize;
614 
615 	args->pitch = align_pitch(0, args->width, args->bpp);
616 	args->size = PAGE_ALIGN(args->pitch * args->height);
617 
618 	gsize = (union omap_gem_size){
619 		.bytes = args->size,
620 	};
621 
622 	return omap_gem_new_handle(dev, file, gsize,
623 			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
624 }
625 
626 /**
627  * omap_gem_dumb_map	-	buffer mapping for dumb interface
628  * @file: our drm client file
629  * @dev: drm device
630  * @handle: GEM handle to the object (from dumb_create)
631  *
632  * Do the necessary setup to allow the mapping of the frame buffer
633  * into user memory. We don't have to do much here at the moment.
634  */
635 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
636 		uint32_t handle, uint64_t *offset)
637 {
638 	struct drm_gem_object *obj;
639 	int ret = 0;
640 
641 	/* GEM does all our handle to object mapping */
642 	obj = drm_gem_object_lookup(dev, file, handle);
643 	if (obj == NULL) {
644 		ret = -ENOENT;
645 		goto fail;
646 	}
647 
648 	*offset = omap_gem_mmap_offset(obj);
649 
650 	drm_gem_object_unreference_unlocked(obj);
651 
652 fail:
653 	return ret;
654 }
655 
656 /* Set scrolling position.  This allows us to implement fast scrolling
657  * for console.
658  *
659  * Call only from non-atomic contexts.
660  */
661 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
662 {
663 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
664 	uint32_t npages = obj->size >> PAGE_SHIFT;
665 	int ret = 0;
666 
667 	if (roll > npages) {
668 		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
669 		return -EINVAL;
670 	}
671 
672 	omap_obj->roll = roll;
673 
674 	mutex_lock(&obj->dev->struct_mutex);
675 
676 	/* if we aren't mapped yet, we don't need to do anything */
677 	if (omap_obj->block) {
678 		struct page **pages;
679 		ret = get_pages(obj, &pages);
680 		if (ret)
681 			goto fail;
682 		ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
683 		if (ret)
684 			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
685 	}
686 
687 fail:
688 	mutex_unlock(&obj->dev->struct_mutex);
689 
690 	return ret;
691 }
692 
693 /* Sync the buffer for CPU access.. note pages should already be
694  * attached, ie. omap_gem_get_pages()
695  */
696 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
697 {
698 	struct drm_device *dev = obj->dev;
699 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
700 
701 	if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
702 		dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
703 				PAGE_SIZE, DMA_BIDIRECTIONAL);
704 		omap_obj->addrs[pgoff] = 0;
705 	}
706 }
707 
708 /* sync the buffer for DMA access */
709 void omap_gem_dma_sync(struct drm_gem_object *obj,
710 		enum dma_data_direction dir)
711 {
712 	struct drm_device *dev = obj->dev;
713 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
714 
715 	if (is_cached_coherent(obj)) {
716 		int i, npages = obj->size >> PAGE_SHIFT;
717 		struct page **pages = omap_obj->pages;
718 		bool dirty = false;
719 
720 		for (i = 0; i < npages; i++) {
721 			if (!omap_obj->addrs[i]) {
722 				omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
723 						PAGE_SIZE, DMA_BIDIRECTIONAL);
724 				dirty = true;
725 			}
726 		}
727 
728 		if (dirty) {
729 			unmap_mapping_range(obj->filp->f_mapping, 0,
730 					omap_gem_mmap_size(obj), 1);
731 		}
732 	}
733 }
734 
735 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
736  * already contiguous, remap it to pin in physically contiguous memory.. (ie.
737  * map in TILER)
738  */
739 int omap_gem_get_paddr(struct drm_gem_object *obj,
740 		dma_addr_t *paddr, bool remap)
741 {
742 	struct omap_drm_private *priv = obj->dev->dev_private;
743 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
744 	int ret = 0;
745 
746 	mutex_lock(&obj->dev->struct_mutex);
747 
748 	if (remap && is_shmem(obj) && priv->has_dmm) {
749 		if (omap_obj->paddr_cnt == 0) {
750 			struct page **pages;
751 			uint32_t npages = obj->size >> PAGE_SHIFT;
752 			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
753 			struct tiler_block *block;
754 
755 			BUG_ON(omap_obj->block);
756 
757 			ret = get_pages(obj, &pages);
758 			if (ret)
759 				goto fail;
760 
761 			if (omap_obj->flags & OMAP_BO_TILED) {
762 				block = tiler_reserve_2d(fmt,
763 						omap_obj->width,
764 						omap_obj->height, 0);
765 			} else {
766 				block = tiler_reserve_1d(obj->size);
767 			}
768 
769 			if (IS_ERR(block)) {
770 				ret = PTR_ERR(block);
771 				dev_err(obj->dev->dev,
772 					"could not remap: %d (%d)\n", ret, fmt);
773 				goto fail;
774 			}
775 
776 			/* TODO: enable async refill.. */
777 			ret = tiler_pin(block, pages, npages,
778 					omap_obj->roll, true);
779 			if (ret) {
780 				tiler_release(block);
781 				dev_err(obj->dev->dev,
782 						"could not pin: %d\n", ret);
783 				goto fail;
784 			}
785 
786 			omap_obj->paddr = tiler_ssptr(block);
787 			omap_obj->block = block;
788 
789 			DBG("got paddr: %pad", &omap_obj->paddr);
790 		}
791 
792 		omap_obj->paddr_cnt++;
793 
794 		*paddr = omap_obj->paddr;
795 	} else if (omap_obj->flags & OMAP_BO_DMA) {
796 		*paddr = omap_obj->paddr;
797 	} else {
798 		ret = -EINVAL;
799 		goto fail;
800 	}
801 
802 fail:
803 	mutex_unlock(&obj->dev->struct_mutex);
804 
805 	return ret;
806 }
807 
808 /* Release physical address, when DMA is no longer being performed.. this
809  * could potentially unpin and unmap buffers from TILER
810  */
811 int omap_gem_put_paddr(struct drm_gem_object *obj)
812 {
813 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
814 	int ret = 0;
815 
816 	mutex_lock(&obj->dev->struct_mutex);
817 	if (omap_obj->paddr_cnt > 0) {
818 		omap_obj->paddr_cnt--;
819 		if (omap_obj->paddr_cnt == 0) {
820 			ret = tiler_unpin(omap_obj->block);
821 			if (ret) {
822 				dev_err(obj->dev->dev,
823 					"could not unpin pages: %d\n", ret);
824 				goto fail;
825 			}
826 			ret = tiler_release(omap_obj->block);
827 			if (ret) {
828 				dev_err(obj->dev->dev,
829 					"could not release unmap: %d\n", ret);
830 			}
831 			omap_obj->paddr = 0;
832 			omap_obj->block = NULL;
833 		}
834 	}
835 fail:
836 	mutex_unlock(&obj->dev->struct_mutex);
837 	return ret;
838 }
839 
840 /* Get rotated scanout address (only valid if already pinned), at the
841  * specified orientation and x,y offset from top-left corner of buffer
842  * (only valid for tiled 2d buffers)
843  */
844 int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
845 		int x, int y, dma_addr_t *paddr)
846 {
847 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
848 	int ret = -EINVAL;
849 
850 	mutex_lock(&obj->dev->struct_mutex);
851 	if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
852 			(omap_obj->flags & OMAP_BO_TILED)) {
853 		*paddr = tiler_tsptr(omap_obj->block, orient, x, y);
854 		ret = 0;
855 	}
856 	mutex_unlock(&obj->dev->struct_mutex);
857 	return ret;
858 }
859 
860 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
861 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
862 {
863 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
864 	int ret = -EINVAL;
865 	if (omap_obj->flags & OMAP_BO_TILED)
866 		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
867 	return ret;
868 }
869 
870 /* acquire pages when needed (for example, for DMA where physically
871  * contiguous buffer is not required
872  */
873 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
874 {
875 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
876 	int ret = 0;
877 
878 	if (is_shmem(obj) && !omap_obj->pages) {
879 		ret = omap_gem_attach_pages(obj);
880 		if (ret) {
881 			dev_err(obj->dev->dev, "could not attach pages\n");
882 			return ret;
883 		}
884 	}
885 
886 	/* TODO: even phys-contig.. we should have a list of pages? */
887 	*pages = omap_obj->pages;
888 
889 	return 0;
890 }
891 
892 /* if !remap, and we don't have pages backing, then fail, rather than
893  * increasing the pin count (which we don't really do yet anyways,
894  * because we don't support swapping pages back out).  And 'remap'
895  * might not be quite the right name, but I wanted to keep it working
896  * similarly to omap_gem_get_paddr().  Note though that mutex is not
897  * aquired if !remap (because this can be called in atomic ctxt),
898  * but probably omap_gem_get_paddr() should be changed to work in the
899  * same way.  If !remap, a matching omap_gem_put_pages() call is not
900  * required (and should not be made).
901  */
902 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
903 		bool remap)
904 {
905 	int ret;
906 	if (!remap) {
907 		struct omap_gem_object *omap_obj = to_omap_bo(obj);
908 		if (!omap_obj->pages)
909 			return -ENOMEM;
910 		*pages = omap_obj->pages;
911 		return 0;
912 	}
913 	mutex_lock(&obj->dev->struct_mutex);
914 	ret = get_pages(obj, pages);
915 	mutex_unlock(&obj->dev->struct_mutex);
916 	return ret;
917 }
918 
919 /* release pages when DMA no longer being performed */
920 int omap_gem_put_pages(struct drm_gem_object *obj)
921 {
922 	/* do something here if we dynamically attach/detach pages.. at
923 	 * least they would no longer need to be pinned if everyone has
924 	 * released the pages..
925 	 */
926 	return 0;
927 }
928 
929 /* Get kernel virtual address for CPU access.. this more or less only
930  * exists for omap_fbdev.  This should be called with struct_mutex
931  * held.
932  */
933 void *omap_gem_vaddr(struct drm_gem_object *obj)
934 {
935 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
936 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
937 	if (!omap_obj->vaddr) {
938 		struct page **pages;
939 		int ret = get_pages(obj, &pages);
940 		if (ret)
941 			return ERR_PTR(ret);
942 		omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
943 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
944 	}
945 	return omap_obj->vaddr;
946 }
947 
948 #ifdef CONFIG_PM
949 /* re-pin objects in DMM in resume path: */
950 int omap_gem_resume(struct device *dev)
951 {
952 	struct drm_device *drm_dev = dev_get_drvdata(dev);
953 	struct omap_drm_private *priv = drm_dev->dev_private;
954 	struct omap_gem_object *omap_obj;
955 	int ret = 0;
956 
957 	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
958 		if (omap_obj->block) {
959 			struct drm_gem_object *obj = &omap_obj->base;
960 			uint32_t npages = obj->size >> PAGE_SHIFT;
961 			WARN_ON(!omap_obj->pages);  /* this can't happen */
962 			ret = tiler_pin(omap_obj->block,
963 					omap_obj->pages, npages,
964 					omap_obj->roll, true);
965 			if (ret) {
966 				dev_err(dev, "could not repin: %d\n", ret);
967 				return ret;
968 			}
969 		}
970 	}
971 
972 	return 0;
973 }
974 #endif
975 
976 #ifdef CONFIG_DEBUG_FS
977 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
978 {
979 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
980 	uint64_t off;
981 
982 	off = drm_vma_node_start(&obj->vma_node);
983 
984 	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
985 			omap_obj->flags, obj->name, obj->refcount.refcount.counter,
986 			off, &omap_obj->paddr, omap_obj->paddr_cnt,
987 			omap_obj->vaddr, omap_obj->roll);
988 
989 	if (omap_obj->flags & OMAP_BO_TILED) {
990 		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
991 		if (omap_obj->block) {
992 			struct tcm_area *area = &omap_obj->block->area;
993 			seq_printf(m, " (%dx%d, %dx%d)",
994 					area->p0.x, area->p0.y,
995 					area->p1.x, area->p1.y);
996 		}
997 	} else {
998 		seq_printf(m, " %d", obj->size);
999 	}
1000 
1001 	seq_printf(m, "\n");
1002 }
1003 
1004 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1005 {
1006 	struct omap_gem_object *omap_obj;
1007 	int count = 0;
1008 	size_t size = 0;
1009 
1010 	list_for_each_entry(omap_obj, list, mm_list) {
1011 		struct drm_gem_object *obj = &omap_obj->base;
1012 		seq_printf(m, "   ");
1013 		omap_gem_describe(obj, m);
1014 		count++;
1015 		size += obj->size;
1016 	}
1017 
1018 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1019 }
1020 #endif
1021 
1022 /* Buffer Synchronization:
1023  */
1024 
1025 struct omap_gem_sync_waiter {
1026 	struct list_head list;
1027 	struct omap_gem_object *omap_obj;
1028 	enum omap_gem_op op;
1029 	uint32_t read_target, write_target;
1030 	/* notify called w/ sync_lock held */
1031 	void (*notify)(void *arg);
1032 	void *arg;
1033 };
1034 
1035 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1036  * the read and/or write target count is achieved which can call a user
1037  * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1038  * cpu access), etc.
1039  */
1040 static LIST_HEAD(waiters);
1041 
1042 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1043 {
1044 	struct omap_gem_object *omap_obj = waiter->omap_obj;
1045 	if ((waiter->op & OMAP_GEM_READ) &&
1046 			(omap_obj->sync->write_complete < waiter->write_target))
1047 		return true;
1048 	if ((waiter->op & OMAP_GEM_WRITE) &&
1049 			(omap_obj->sync->read_complete < waiter->read_target))
1050 		return true;
1051 	return false;
1052 }
1053 
1054 /* macro for sync debug.. */
1055 #define SYNCDBG 0
1056 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1057 		printk(KERN_ERR "%s:%d: "fmt"\n", \
1058 				__func__, __LINE__, ##__VA_ARGS__); \
1059 	} while (0)
1060 
1061 
1062 static void sync_op_update(void)
1063 {
1064 	struct omap_gem_sync_waiter *waiter, *n;
1065 	list_for_each_entry_safe(waiter, n, &waiters, list) {
1066 		if (!is_waiting(waiter)) {
1067 			list_del(&waiter->list);
1068 			SYNC("notify: %p", waiter);
1069 			waiter->notify(waiter->arg);
1070 			kfree(waiter);
1071 		}
1072 	}
1073 }
1074 
1075 static inline int sync_op(struct drm_gem_object *obj,
1076 		enum omap_gem_op op, bool start)
1077 {
1078 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1079 	int ret = 0;
1080 
1081 	spin_lock(&sync_lock);
1082 
1083 	if (!omap_obj->sync) {
1084 		omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1085 		if (!omap_obj->sync) {
1086 			ret = -ENOMEM;
1087 			goto unlock;
1088 		}
1089 	}
1090 
1091 	if (start) {
1092 		if (op & OMAP_GEM_READ)
1093 			omap_obj->sync->read_pending++;
1094 		if (op & OMAP_GEM_WRITE)
1095 			omap_obj->sync->write_pending++;
1096 	} else {
1097 		if (op & OMAP_GEM_READ)
1098 			omap_obj->sync->read_complete++;
1099 		if (op & OMAP_GEM_WRITE)
1100 			omap_obj->sync->write_complete++;
1101 		sync_op_update();
1102 	}
1103 
1104 unlock:
1105 	spin_unlock(&sync_lock);
1106 
1107 	return ret;
1108 }
1109 
1110 /* it is a bit lame to handle updates in this sort of polling way, but
1111  * in case of PVR, the GPU can directly update read/write complete
1112  * values, and not really tell us which ones it updated.. this also
1113  * means that sync_lock is not quite sufficient.  So we'll need to
1114  * do something a bit better when it comes time to add support for
1115  * separate 2d hw..
1116  */
1117 void omap_gem_op_update(void)
1118 {
1119 	spin_lock(&sync_lock);
1120 	sync_op_update();
1121 	spin_unlock(&sync_lock);
1122 }
1123 
1124 /* mark the start of read and/or write operation */
1125 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1126 {
1127 	return sync_op(obj, op, true);
1128 }
1129 
1130 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1131 {
1132 	return sync_op(obj, op, false);
1133 }
1134 
1135 static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1136 
1137 static void sync_notify(void *arg)
1138 {
1139 	struct task_struct **waiter_task = arg;
1140 	*waiter_task = NULL;
1141 	wake_up_all(&sync_event);
1142 }
1143 
1144 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1145 {
1146 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1147 	int ret = 0;
1148 	if (omap_obj->sync) {
1149 		struct task_struct *waiter_task = current;
1150 		struct omap_gem_sync_waiter *waiter =
1151 				kzalloc(sizeof(*waiter), GFP_KERNEL);
1152 
1153 		if (!waiter)
1154 			return -ENOMEM;
1155 
1156 		waiter->omap_obj = omap_obj;
1157 		waiter->op = op;
1158 		waiter->read_target = omap_obj->sync->read_pending;
1159 		waiter->write_target = omap_obj->sync->write_pending;
1160 		waiter->notify = sync_notify;
1161 		waiter->arg = &waiter_task;
1162 
1163 		spin_lock(&sync_lock);
1164 		if (is_waiting(waiter)) {
1165 			SYNC("waited: %p", waiter);
1166 			list_add_tail(&waiter->list, &waiters);
1167 			spin_unlock(&sync_lock);
1168 			ret = wait_event_interruptible(sync_event,
1169 					(waiter_task == NULL));
1170 			spin_lock(&sync_lock);
1171 			if (waiter_task) {
1172 				SYNC("interrupted: %p", waiter);
1173 				/* we were interrupted */
1174 				list_del(&waiter->list);
1175 				waiter_task = NULL;
1176 			} else {
1177 				/* freed in sync_op_update() */
1178 				waiter = NULL;
1179 			}
1180 		}
1181 		spin_unlock(&sync_lock);
1182 		kfree(waiter);
1183 	}
1184 	return ret;
1185 }
1186 
1187 /* call fxn(arg), either synchronously or asynchronously if the op
1188  * is currently blocked..  fxn() can be called from any context
1189  *
1190  * (TODO for now fxn is called back from whichever context calls
1191  * omap_gem_op_update().. but this could be better defined later
1192  * if needed)
1193  *
1194  * TODO more code in common w/ _sync()..
1195  */
1196 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1197 		void (*fxn)(void *arg), void *arg)
1198 {
1199 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1200 	if (omap_obj->sync) {
1201 		struct omap_gem_sync_waiter *waiter =
1202 				kzalloc(sizeof(*waiter), GFP_ATOMIC);
1203 
1204 		if (!waiter)
1205 			return -ENOMEM;
1206 
1207 		waiter->omap_obj = omap_obj;
1208 		waiter->op = op;
1209 		waiter->read_target = omap_obj->sync->read_pending;
1210 		waiter->write_target = omap_obj->sync->write_pending;
1211 		waiter->notify = fxn;
1212 		waiter->arg = arg;
1213 
1214 		spin_lock(&sync_lock);
1215 		if (is_waiting(waiter)) {
1216 			SYNC("waited: %p", waiter);
1217 			list_add_tail(&waiter->list, &waiters);
1218 			spin_unlock(&sync_lock);
1219 			return 0;
1220 		}
1221 
1222 		spin_unlock(&sync_lock);
1223 
1224 		kfree(waiter);
1225 	}
1226 
1227 	/* no waiting.. */
1228 	fxn(arg);
1229 
1230 	return 0;
1231 }
1232 
1233 /* special API so PVR can update the buffer to use a sync-object allocated
1234  * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
1235  * perspective) sync-object, so we overwrite the new syncobj w/ values
1236  * from the already allocated syncobj (if there is one)
1237  */
1238 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1239 {
1240 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1241 	int ret = 0;
1242 
1243 	spin_lock(&sync_lock);
1244 
1245 	if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1246 		/* clearing a previously set syncobj */
1247 		syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1248 				  GFP_ATOMIC);
1249 		if (!syncobj) {
1250 			ret = -ENOMEM;
1251 			goto unlock;
1252 		}
1253 		omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1254 		omap_obj->sync = syncobj;
1255 	} else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1256 		/* replacing an existing syncobj */
1257 		if (omap_obj->sync) {
1258 			memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1259 			kfree(omap_obj->sync);
1260 		}
1261 		omap_obj->flags |= OMAP_BO_EXT_SYNC;
1262 		omap_obj->sync = syncobj;
1263 	}
1264 
1265 unlock:
1266 	spin_unlock(&sync_lock);
1267 	return ret;
1268 }
1269 
1270 /* don't call directly.. called from GEM core when it is time to actually
1271  * free the object..
1272  */
1273 void omap_gem_free_object(struct drm_gem_object *obj)
1274 {
1275 	struct drm_device *dev = obj->dev;
1276 	struct omap_drm_private *priv = dev->dev_private;
1277 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1278 
1279 	evict(obj);
1280 
1281 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1282 
1283 	spin_lock(&priv->list_lock);
1284 	list_del(&omap_obj->mm_list);
1285 	spin_unlock(&priv->list_lock);
1286 
1287 	drm_gem_free_mmap_offset(obj);
1288 
1289 	/* this means the object is still pinned.. which really should
1290 	 * not happen.  I think..
1291 	 */
1292 	WARN_ON(omap_obj->paddr_cnt > 0);
1293 
1294 	/* don't free externally allocated backing memory */
1295 	if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1296 		if (omap_obj->pages)
1297 			omap_gem_detach_pages(obj);
1298 
1299 		if (!is_shmem(obj)) {
1300 			dma_free_writecombine(dev->dev, obj->size,
1301 					omap_obj->vaddr, omap_obj->paddr);
1302 		} else if (omap_obj->vaddr) {
1303 			vunmap(omap_obj->vaddr);
1304 		}
1305 	}
1306 
1307 	/* don't free externally allocated syncobj */
1308 	if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1309 		kfree(omap_obj->sync);
1310 
1311 	drm_gem_object_release(obj);
1312 
1313 	kfree(obj);
1314 }
1315 
1316 /* convenience method to construct a GEM buffer object, and userspace handle */
1317 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1318 		union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1319 {
1320 	struct drm_gem_object *obj;
1321 	int ret;
1322 
1323 	obj = omap_gem_new(dev, gsize, flags);
1324 	if (!obj)
1325 		return -ENOMEM;
1326 
1327 	ret = drm_gem_handle_create(file, obj, handle);
1328 	if (ret) {
1329 		drm_gem_object_release(obj);
1330 		kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1331 		return ret;
1332 	}
1333 
1334 	/* drop reference from allocate - handle holds it now */
1335 	drm_gem_object_unreference_unlocked(obj);
1336 
1337 	return 0;
1338 }
1339 
1340 /* GEM buffer object constructor */
1341 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1342 		union omap_gem_size gsize, uint32_t flags)
1343 {
1344 	struct omap_drm_private *priv = dev->dev_private;
1345 	struct omap_gem_object *omap_obj;
1346 	struct drm_gem_object *obj = NULL;
1347 	struct address_space *mapping;
1348 	size_t size;
1349 	int ret;
1350 
1351 	if (flags & OMAP_BO_TILED) {
1352 		if (!usergart) {
1353 			dev_err(dev->dev, "Tiled buffers require DMM\n");
1354 			goto fail;
1355 		}
1356 
1357 		/* tiled buffers are always shmem paged backed.. when they are
1358 		 * scanned out, they are remapped into DMM/TILER
1359 		 */
1360 		flags &= ~OMAP_BO_SCANOUT;
1361 
1362 		/* currently don't allow cached buffers.. there is some caching
1363 		 * stuff that needs to be handled better
1364 		 */
1365 		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1366 		flags |= tiler_get_cpu_cache_flags();
1367 
1368 		/* align dimensions to slot boundaries... */
1369 		tiler_align(gem2fmt(flags),
1370 				&gsize.tiled.width, &gsize.tiled.height);
1371 
1372 		/* ...and calculate size based on aligned dimensions */
1373 		size = tiler_size(gem2fmt(flags),
1374 				gsize.tiled.width, gsize.tiled.height);
1375 	} else {
1376 		size = PAGE_ALIGN(gsize.bytes);
1377 	}
1378 
1379 	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1380 	if (!omap_obj)
1381 		goto fail;
1382 
1383 	spin_lock(&priv->list_lock);
1384 	list_add(&omap_obj->mm_list, &priv->obj_list);
1385 	spin_unlock(&priv->list_lock);
1386 
1387 	obj = &omap_obj->base;
1388 
1389 	if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1390 		/* attempt to allocate contiguous memory if we don't
1391 		 * have DMM for remappign discontiguous buffers
1392 		 */
1393 		omap_obj->vaddr =  dma_alloc_writecombine(dev->dev, size,
1394 				&omap_obj->paddr, GFP_KERNEL);
1395 		if (omap_obj->vaddr)
1396 			flags |= OMAP_BO_DMA;
1397 
1398 	}
1399 
1400 	omap_obj->flags = flags;
1401 
1402 	if (flags & OMAP_BO_TILED) {
1403 		omap_obj->width = gsize.tiled.width;
1404 		omap_obj->height = gsize.tiled.height;
1405 	}
1406 
1407 	if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
1408 		drm_gem_private_object_init(dev, obj, size);
1409 	} else {
1410 		ret = drm_gem_object_init(dev, obj, size);
1411 		if (ret)
1412 			goto fail;
1413 
1414 		mapping = file_inode(obj->filp)->i_mapping;
1415 		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1416 	}
1417 
1418 	return obj;
1419 
1420 fail:
1421 	if (obj)
1422 		omap_gem_free_object(obj);
1423 
1424 	return NULL;
1425 }
1426 
1427 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1428 void omap_gem_init(struct drm_device *dev)
1429 {
1430 	struct omap_drm_private *priv = dev->dev_private;
1431 	const enum tiler_fmt fmts[] = {
1432 			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1433 	};
1434 	int i, j;
1435 
1436 	if (!dmm_is_available()) {
1437 		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1438 		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1439 		return;
1440 	}
1441 
1442 	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1443 	if (!usergart)
1444 		return;
1445 
1446 	/* reserve 4k aligned/wide regions for userspace mappings: */
1447 	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1448 		uint16_t h = 1, w = PAGE_SIZE >> i;
1449 		tiler_align(fmts[i], &w, &h);
1450 		/* note: since each region is 1 4kb page wide, and minimum
1451 		 * number of rows, the height ends up being the same as the
1452 		 * # of pages in the region
1453 		 */
1454 		usergart[i].height = h;
1455 		usergart[i].height_shift = ilog2(h);
1456 		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1457 		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1458 		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1459 			struct usergart_entry *entry = &usergart[i].entry[j];
1460 			struct tiler_block *block =
1461 					tiler_reserve_2d(fmts[i], w, h,
1462 							PAGE_SIZE);
1463 			if (IS_ERR(block)) {
1464 				dev_err(dev->dev,
1465 						"reserve failed: %d, %d, %ld\n",
1466 						i, j, PTR_ERR(block));
1467 				return;
1468 			}
1469 			entry->paddr = tiler_ssptr(block);
1470 			entry->block = block;
1471 
1472 			DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1473 					&entry->paddr,
1474 					usergart[i].stride_pfn << PAGE_SHIFT);
1475 		}
1476 	}
1477 
1478 	priv->has_dmm = true;
1479 }
1480 
1481 void omap_gem_deinit(struct drm_device *dev)
1482 {
1483 	/* I believe we can rely on there being no more outstanding GEM
1484 	 * objects which could depend on usergart/dmm at this point.
1485 	 */
1486 	kfree(usergart);
1487 }
1488