xref: /openbmc/linux/drivers/gpu/drm/omapdrm/omap_gem.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4  * Author: Rob Clark <rob.clark@linaro.org>
5  */
6 
7 #include <linux/seq_file.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/pfn_t.h>
11 
12 #include <drm/drm_vma_manager.h>
13 
14 #include "omap_drv.h"
15 #include "omap_dmm_tiler.h"
16 
17 /*
18  * GEM buffer object implementation.
19  */
20 
21 /* note: we use upper 8 bits of flags for driver-internal flags: */
22 #define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
23 #define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
24 #define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
25 
26 struct omap_gem_object {
27 	struct drm_gem_object base;
28 
29 	struct list_head mm_list;
30 
31 	u32 flags;
32 
33 	/** width/height for tiled formats (rounded up to slot boundaries) */
34 	u16 width, height;
35 
36 	/** roll applied when mapping to DMM */
37 	u32 roll;
38 
39 	/** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
40 	struct mutex lock;
41 
42 	/**
43 	 * dma_addr contains the buffer DMA address. It is valid for
44 	 *
45 	 * - buffers allocated through the DMA mapping API (with the
46 	 *   OMAP_BO_MEM_DMA_API flag set)
47 	 *
48 	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
49 	 *   if they are physically contiguous (when sgt->orig_nents == 1)
50 	 *
51 	 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
52 	 *   which case the DMA address points to the TILER aperture
53 	 *
54 	 * Physically contiguous buffers have their DMA address equal to the
55 	 * physical address as we don't remap those buffers through the TILER.
56 	 *
57 	 * Buffers mapped to the TILER have their DMA address pointing to the
58 	 * TILER aperture. As TILER mappings are refcounted (through
59 	 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
60 	 * to ensure that the mapping won't disappear unexpectedly. References
61 	 * must be released with omap_gem_unpin().
62 	 */
63 	dma_addr_t dma_addr;
64 
65 	/**
66 	 * # of users of dma_addr
67 	 */
68 	u32 dma_addr_cnt;
69 
70 	/**
71 	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
72 	 * is set and the sgt field is valid.
73 	 */
74 	struct sg_table *sgt;
75 
76 	/**
77 	 * tiler block used when buffer is remapped in DMM/TILER.
78 	 */
79 	struct tiler_block *block;
80 
81 	/**
82 	 * Array of backing pages, if allocated.  Note that pages are never
83 	 * allocated for buffers originally allocated from contiguous memory
84 	 */
85 	struct page **pages;
86 
87 	/** addresses corresponding to pages in above array */
88 	dma_addr_t *dma_addrs;
89 
90 	/**
91 	 * Virtual address, if mapped.
92 	 */
93 	void *vaddr;
94 };
95 
96 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
97 
98 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
99  * not necessarily pinned in TILER all the time, and (b) when they are
100  * they are not necessarily page aligned, we reserve one or more small
101  * regions in each of the 2d containers to use as a user-GART where we
102  * can create a second page-aligned mapping of parts of the buffer
103  * being accessed from userspace.
104  *
105  * Note that we could optimize slightly when we know that multiple
106  * tiler containers are backed by the same PAT.. but I'll leave that
107  * for later..
108  */
109 #define NUM_USERGART_ENTRIES 2
110 struct omap_drm_usergart_entry {
111 	struct tiler_block *block;	/* the reserved tiler block */
112 	dma_addr_t dma_addr;
113 	struct drm_gem_object *obj;	/* the current pinned obj */
114 	pgoff_t obj_pgoff;		/* page offset of obj currently
115 					   mapped in */
116 };
117 
118 struct omap_drm_usergart {
119 	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
120 	int height;				/* height in rows */
121 	int height_shift;		/* ilog2(height in rows) */
122 	int slot_shift;			/* ilog2(width per slot) */
123 	int stride_pfn;			/* stride in pages */
124 	int last;				/* index of last used entry */
125 };
126 
127 /* -----------------------------------------------------------------------------
128  * Helpers
129  */
130 
131 /** get mmap offset */
132 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
133 {
134 	struct drm_device *dev = obj->dev;
135 	int ret;
136 	size_t size;
137 
138 	/* Make it mmapable */
139 	size = omap_gem_mmap_size(obj);
140 	ret = drm_gem_create_mmap_offset_size(obj, size);
141 	if (ret) {
142 		dev_err(dev->dev, "could not allocate mmap offset\n");
143 		return 0;
144 	}
145 
146 	return drm_vma_node_offset_addr(&obj->vma_node);
147 }
148 
149 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
150 {
151 	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
152 		return true;
153 
154 	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
155 		return true;
156 
157 	return false;
158 }
159 
160 /* -----------------------------------------------------------------------------
161  * Eviction
162  */
163 
164 static void omap_gem_evict_entry(struct drm_gem_object *obj,
165 		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
166 {
167 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
168 	struct omap_drm_private *priv = obj->dev->dev_private;
169 	int n = priv->usergart[fmt].height;
170 	size_t size = PAGE_SIZE * n;
171 	loff_t off = omap_gem_mmap_offset(obj) +
172 			(entry->obj_pgoff << PAGE_SHIFT);
173 	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
174 
175 	if (m > 1) {
176 		int i;
177 		/* if stride > than PAGE_SIZE then sparse mapping: */
178 		for (i = n; i > 0; i--) {
179 			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
180 					    off, PAGE_SIZE, 1);
181 			off += PAGE_SIZE * m;
182 		}
183 	} else {
184 		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
185 				    off, size, 1);
186 	}
187 
188 	entry->obj = NULL;
189 }
190 
191 /* Evict a buffer from usergart, if it is mapped there */
192 static void omap_gem_evict(struct drm_gem_object *obj)
193 {
194 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
195 	struct omap_drm_private *priv = obj->dev->dev_private;
196 
197 	if (omap_obj->flags & OMAP_BO_TILED) {
198 		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
199 		int i;
200 
201 		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
202 			struct omap_drm_usergart_entry *entry =
203 				&priv->usergart[fmt].entry[i];
204 
205 			if (entry->obj == obj)
206 				omap_gem_evict_entry(obj, fmt, entry);
207 		}
208 	}
209 }
210 
211 /* -----------------------------------------------------------------------------
212  * Page Management
213  */
214 
215 /*
216  * Ensure backing pages are allocated. Must be called with the omap_obj.lock
217  * held.
218  */
219 static int omap_gem_attach_pages(struct drm_gem_object *obj)
220 {
221 	struct drm_device *dev = obj->dev;
222 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
223 	struct page **pages;
224 	int npages = obj->size >> PAGE_SHIFT;
225 	int i, ret;
226 	dma_addr_t *addrs;
227 
228 	lockdep_assert_held(&omap_obj->lock);
229 
230 	/*
231 	 * If not using shmem (in which case backing pages don't need to be
232 	 * allocated) or if pages are already allocated we're done.
233 	 */
234 	if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
235 		return 0;
236 
237 	pages = drm_gem_get_pages(obj);
238 	if (IS_ERR(pages)) {
239 		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
240 		return PTR_ERR(pages);
241 	}
242 
243 	/* for non-cached buffers, ensure the new pages are clean because
244 	 * DSS, GPU, etc. are not cache coherent:
245 	 */
246 	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
247 		addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
248 		if (!addrs) {
249 			ret = -ENOMEM;
250 			goto free_pages;
251 		}
252 
253 		for (i = 0; i < npages; i++) {
254 			addrs[i] = dma_map_page(dev->dev, pages[i],
255 					0, PAGE_SIZE, DMA_TO_DEVICE);
256 
257 			if (dma_mapping_error(dev->dev, addrs[i])) {
258 				dev_warn(dev->dev,
259 					"%s: failed to map page\n", __func__);
260 
261 				for (i = i - 1; i >= 0; --i) {
262 					dma_unmap_page(dev->dev, addrs[i],
263 						PAGE_SIZE, DMA_TO_DEVICE);
264 				}
265 
266 				ret = -ENOMEM;
267 				goto free_addrs;
268 			}
269 		}
270 	} else {
271 		addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
272 		if (!addrs) {
273 			ret = -ENOMEM;
274 			goto free_pages;
275 		}
276 	}
277 
278 	omap_obj->dma_addrs = addrs;
279 	omap_obj->pages = pages;
280 
281 	return 0;
282 
283 free_addrs:
284 	kfree(addrs);
285 free_pages:
286 	drm_gem_put_pages(obj, pages, true, false);
287 
288 	return ret;
289 }
290 
291 /* Release backing pages. Must be called with the omap_obj.lock held. */
292 static void omap_gem_detach_pages(struct drm_gem_object *obj)
293 {
294 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
295 	unsigned int npages = obj->size >> PAGE_SHIFT;
296 	unsigned int i;
297 
298 	lockdep_assert_held(&omap_obj->lock);
299 
300 	for (i = 0; i < npages; i++) {
301 		if (omap_obj->dma_addrs[i])
302 			dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
303 				       PAGE_SIZE, DMA_TO_DEVICE);
304 	}
305 
306 	kfree(omap_obj->dma_addrs);
307 	omap_obj->dma_addrs = NULL;
308 
309 	drm_gem_put_pages(obj, omap_obj->pages, true, false);
310 	omap_obj->pages = NULL;
311 }
312 
313 /* get buffer flags */
314 u32 omap_gem_flags(struct drm_gem_object *obj)
315 {
316 	return to_omap_bo(obj)->flags;
317 }
318 
319 /** get mmap size */
320 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
321 {
322 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
323 	size_t size = obj->size;
324 
325 	if (omap_obj->flags & OMAP_BO_TILED) {
326 		/* for tiled buffers, the virtual size has stride rounded up
327 		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
328 		 * 32kb later!).  But we don't back the entire buffer with
329 		 * pages, only the valid picture part.. so need to adjust for
330 		 * this in the size used to mmap and generate mmap offset
331 		 */
332 		size = tiler_vsize(gem2fmt(omap_obj->flags),
333 				omap_obj->width, omap_obj->height);
334 	}
335 
336 	return size;
337 }
338 
339 /* -----------------------------------------------------------------------------
340  * Fault Handling
341  */
342 
343 /* Normal handling for the case of faulting in non-tiled buffers */
344 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
345 		struct vm_area_struct *vma, struct vm_fault *vmf)
346 {
347 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
348 	unsigned long pfn;
349 	pgoff_t pgoff;
350 
351 	/* We don't use vmf->pgoff since that has the fake offset: */
352 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
353 
354 	if (omap_obj->pages) {
355 		omap_gem_cpu_sync_page(obj, pgoff);
356 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
357 	} else {
358 		BUG_ON(!omap_gem_is_contiguous(omap_obj));
359 		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
360 	}
361 
362 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
363 			pfn, pfn << PAGE_SHIFT);
364 
365 	return vmf_insert_mixed(vma, vmf->address,
366 			__pfn_to_pfn_t(pfn, PFN_DEV));
367 }
368 
369 /* Special handling for the case of faulting in 2d tiled buffers */
370 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
371 		struct vm_area_struct *vma, struct vm_fault *vmf)
372 {
373 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
374 	struct omap_drm_private *priv = obj->dev->dev_private;
375 	struct omap_drm_usergart_entry *entry;
376 	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
377 	struct page *pages[64];  /* XXX is this too much to have on stack? */
378 	unsigned long pfn;
379 	pgoff_t pgoff, base_pgoff;
380 	unsigned long vaddr;
381 	int i, err, slots;
382 	vm_fault_t ret = VM_FAULT_NOPAGE;
383 
384 	/*
385 	 * Note the height of the slot is also equal to the number of pages
386 	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
387 	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
388 	 */
389 	const int n = priv->usergart[fmt].height;
390 	const int n_shift = priv->usergart[fmt].height_shift;
391 
392 	/*
393 	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
394 	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
395 	 * into account in some of the math, so figure out virtual stride
396 	 * in pages
397 	 */
398 	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
399 
400 	/* We don't use vmf->pgoff since that has the fake offset: */
401 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
402 
403 	/*
404 	 * Actual address we start mapping at is rounded down to previous slot
405 	 * boundary in the y direction:
406 	 */
407 	base_pgoff = round_down(pgoff, m << n_shift);
408 
409 	/* figure out buffer width in slots */
410 	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
411 
412 	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
413 
414 	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
415 
416 	/* evict previous buffer using this usergart entry, if any: */
417 	if (entry->obj)
418 		omap_gem_evict_entry(entry->obj, fmt, entry);
419 
420 	entry->obj = obj;
421 	entry->obj_pgoff = base_pgoff;
422 
423 	/* now convert base_pgoff to phys offset from virt offset: */
424 	base_pgoff = (base_pgoff >> n_shift) * slots;
425 
426 	/* for wider-than 4k.. figure out which part of the slot-row we want: */
427 	if (m > 1) {
428 		int off = pgoff % m;
429 		entry->obj_pgoff += off;
430 		base_pgoff /= m;
431 		slots = min(slots - (off << n_shift), n);
432 		base_pgoff += off << n_shift;
433 		vaddr += off << PAGE_SHIFT;
434 	}
435 
436 	/*
437 	 * Map in pages. Beyond the valid pixel part of the buffer, we set
438 	 * pages[i] to NULL to get a dummy page mapped in.. if someone
439 	 * reads/writes it they will get random/undefined content, but at
440 	 * least it won't be corrupting whatever other random page used to
441 	 * be mapped in, or other undefined behavior.
442 	 */
443 	memcpy(pages, &omap_obj->pages[base_pgoff],
444 			sizeof(struct page *) * slots);
445 	memset(pages + slots, 0,
446 			sizeof(struct page *) * (n - slots));
447 
448 	err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
449 	if (err) {
450 		ret = vmf_error(err);
451 		dev_err(obj->dev->dev, "failed to pin: %d\n", err);
452 		return ret;
453 	}
454 
455 	pfn = entry->dma_addr >> PAGE_SHIFT;
456 
457 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
458 			pfn, pfn << PAGE_SHIFT);
459 
460 	for (i = n; i > 0; i--) {
461 		ret = vmf_insert_mixed(vma,
462 			vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
463 		if (ret & VM_FAULT_ERROR)
464 			break;
465 		pfn += priv->usergart[fmt].stride_pfn;
466 		vaddr += PAGE_SIZE * m;
467 	}
468 
469 	/* simple round-robin: */
470 	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
471 				 % NUM_USERGART_ENTRIES;
472 
473 	return ret;
474 }
475 
476 /**
477  * omap_gem_fault		-	pagefault handler for GEM objects
478  * @vmf: fault detail
479  *
480  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
481  * does most of the work for us including the actual map/unmap calls
482  * but we need to do the actual page work.
483  *
484  * The VMA was set up by GEM. In doing so it also ensured that the
485  * vma->vm_private_data points to the GEM object that is backing this
486  * mapping.
487  */
488 vm_fault_t omap_gem_fault(struct vm_fault *vmf)
489 {
490 	struct vm_area_struct *vma = vmf->vma;
491 	struct drm_gem_object *obj = vma->vm_private_data;
492 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
493 	int err;
494 	vm_fault_t ret;
495 
496 	/* Make sure we don't parallel update on a fault, nor move or remove
497 	 * something from beneath our feet
498 	 */
499 	mutex_lock(&omap_obj->lock);
500 
501 	/* if a shmem backed object, make sure we have pages attached now */
502 	err = omap_gem_attach_pages(obj);
503 	if (err) {
504 		ret = vmf_error(err);
505 		goto fail;
506 	}
507 
508 	/* where should we do corresponding put_pages().. we are mapping
509 	 * the original page, rather than thru a GART, so we can't rely
510 	 * on eviction to trigger this.  But munmap() or all mappings should
511 	 * probably trigger put_pages()?
512 	 */
513 
514 	if (omap_obj->flags & OMAP_BO_TILED)
515 		ret = omap_gem_fault_2d(obj, vma, vmf);
516 	else
517 		ret = omap_gem_fault_1d(obj, vma, vmf);
518 
519 
520 fail:
521 	mutex_unlock(&omap_obj->lock);
522 	return ret;
523 }
524 
525 /** We override mainly to fix up some of the vm mapping flags.. */
526 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
527 {
528 	int ret;
529 
530 	ret = drm_gem_mmap(filp, vma);
531 	if (ret) {
532 		DBG("mmap failed: %d", ret);
533 		return ret;
534 	}
535 
536 	return omap_gem_mmap_obj(vma->vm_private_data, vma);
537 }
538 
539 int omap_gem_mmap_obj(struct drm_gem_object *obj,
540 		struct vm_area_struct *vma)
541 {
542 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
543 
544 	vma->vm_flags &= ~VM_PFNMAP;
545 	vma->vm_flags |= VM_MIXEDMAP;
546 
547 	if (omap_obj->flags & OMAP_BO_WC) {
548 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
549 	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
550 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
551 	} else {
552 		/*
553 		 * We do have some private objects, at least for scanout buffers
554 		 * on hardware without DMM/TILER.  But these are allocated write-
555 		 * combine
556 		 */
557 		if (WARN_ON(!obj->filp))
558 			return -EINVAL;
559 
560 		/*
561 		 * Shunt off cached objs to shmem file so they have their own
562 		 * address_space (so unmap_mapping_range does what we want,
563 		 * in particular in the case of mmap'd dmabufs)
564 		 */
565 		fput(vma->vm_file);
566 		vma->vm_pgoff = 0;
567 		vma->vm_file  = get_file(obj->filp);
568 
569 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
570 	}
571 
572 	return 0;
573 }
574 
575 /* -----------------------------------------------------------------------------
576  * Dumb Buffers
577  */
578 
579 /**
580  * omap_gem_dumb_create	-	create a dumb buffer
581  * @drm_file: our client file
582  * @dev: our device
583  * @args: the requested arguments copied from userspace
584  *
585  * Allocate a buffer suitable for use for a frame buffer of the
586  * form described by user space. Give userspace a handle by which
587  * to reference it.
588  */
589 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
590 		struct drm_mode_create_dumb *args)
591 {
592 	union omap_gem_size gsize;
593 
594 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
595 
596 	args->size = PAGE_ALIGN(args->pitch * args->height);
597 
598 	gsize = (union omap_gem_size){
599 		.bytes = args->size,
600 	};
601 
602 	return omap_gem_new_handle(dev, file, gsize,
603 			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
604 }
605 
606 /**
607  * omap_gem_dumb_map	-	buffer mapping for dumb interface
608  * @file: our drm client file
609  * @dev: drm device
610  * @handle: GEM handle to the object (from dumb_create)
611  *
612  * Do the necessary setup to allow the mapping of the frame buffer
613  * into user memory. We don't have to do much here at the moment.
614  */
615 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
616 		u32 handle, u64 *offset)
617 {
618 	struct drm_gem_object *obj;
619 	int ret = 0;
620 
621 	/* GEM does all our handle to object mapping */
622 	obj = drm_gem_object_lookup(file, handle);
623 	if (obj == NULL) {
624 		ret = -ENOENT;
625 		goto fail;
626 	}
627 
628 	*offset = omap_gem_mmap_offset(obj);
629 
630 	drm_gem_object_put_unlocked(obj);
631 
632 fail:
633 	return ret;
634 }
635 
636 #ifdef CONFIG_DRM_FBDEV_EMULATION
637 /* Set scrolling position.  This allows us to implement fast scrolling
638  * for console.
639  *
640  * Call only from non-atomic contexts.
641  */
642 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
643 {
644 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
645 	u32 npages = obj->size >> PAGE_SHIFT;
646 	int ret = 0;
647 
648 	if (roll > npages) {
649 		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
650 		return -EINVAL;
651 	}
652 
653 	omap_obj->roll = roll;
654 
655 	mutex_lock(&omap_obj->lock);
656 
657 	/* if we aren't mapped yet, we don't need to do anything */
658 	if (omap_obj->block) {
659 		ret = omap_gem_attach_pages(obj);
660 		if (ret)
661 			goto fail;
662 
663 		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
664 				roll, true);
665 		if (ret)
666 			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
667 	}
668 
669 fail:
670 	mutex_unlock(&omap_obj->lock);
671 
672 	return ret;
673 }
674 #endif
675 
676 /* -----------------------------------------------------------------------------
677  * Memory Management & DMA Sync
678  */
679 
680 /*
681  * shmem buffers that are mapped cached are not coherent.
682  *
683  * We keep track of dirty pages using page faulting to perform cache management.
684  * When a page is mapped to the CPU in read/write mode the device can't access
685  * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
686  * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
687  * unmapped from the CPU.
688  */
689 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
690 {
691 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
692 
693 	return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
694 		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
695 }
696 
697 /* Sync the buffer for CPU access.. note pages should already be
698  * attached, ie. omap_gem_get_pages()
699  */
700 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
701 {
702 	struct drm_device *dev = obj->dev;
703 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
704 
705 	if (omap_gem_is_cached_coherent(obj))
706 		return;
707 
708 	if (omap_obj->dma_addrs[pgoff]) {
709 		dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
710 				PAGE_SIZE, DMA_TO_DEVICE);
711 		omap_obj->dma_addrs[pgoff] = 0;
712 	}
713 }
714 
715 /* sync the buffer for DMA access */
716 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
717 		enum dma_data_direction dir)
718 {
719 	struct drm_device *dev = obj->dev;
720 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
721 	int i, npages = obj->size >> PAGE_SHIFT;
722 	struct page **pages = omap_obj->pages;
723 	bool dirty = false;
724 
725 	if (omap_gem_is_cached_coherent(obj))
726 		return;
727 
728 	for (i = 0; i < npages; i++) {
729 		if (!omap_obj->dma_addrs[i]) {
730 			dma_addr_t addr;
731 
732 			addr = dma_map_page(dev->dev, pages[i], 0,
733 					    PAGE_SIZE, dir);
734 			if (dma_mapping_error(dev->dev, addr)) {
735 				dev_warn(dev->dev, "%s: failed to map page\n",
736 					__func__);
737 				break;
738 			}
739 
740 			dirty = true;
741 			omap_obj->dma_addrs[i] = addr;
742 		}
743 	}
744 
745 	if (dirty) {
746 		unmap_mapping_range(obj->filp->f_mapping, 0,
747 				    omap_gem_mmap_size(obj), 1);
748 	}
749 }
750 
751 /**
752  * omap_gem_pin() - Pin a GEM object in memory
753  * @obj: the GEM object
754  * @dma_addr: the DMA address
755  *
756  * Pin the given GEM object in memory and fill the dma_addr pointer with the
757  * object's DMA address. If the buffer is not physically contiguous it will be
758  * remapped through the TILER to provide a contiguous view.
759  *
760  * Pins are reference-counted, calling this function multiple times is allowed
761  * as long the corresponding omap_gem_unpin() calls are balanced.
762  *
763  * Return 0 on success or a negative error code otherwise.
764  */
765 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
766 {
767 	struct omap_drm_private *priv = obj->dev->dev_private;
768 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
769 	int ret = 0;
770 
771 	mutex_lock(&omap_obj->lock);
772 
773 	if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
774 		if (omap_obj->dma_addr_cnt == 0) {
775 			u32 npages = obj->size >> PAGE_SHIFT;
776 			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
777 			struct tiler_block *block;
778 
779 			BUG_ON(omap_obj->block);
780 
781 			ret = omap_gem_attach_pages(obj);
782 			if (ret)
783 				goto fail;
784 
785 			if (omap_obj->flags & OMAP_BO_TILED) {
786 				block = tiler_reserve_2d(fmt,
787 						omap_obj->width,
788 						omap_obj->height, 0);
789 			} else {
790 				block = tiler_reserve_1d(obj->size);
791 			}
792 
793 			if (IS_ERR(block)) {
794 				ret = PTR_ERR(block);
795 				dev_err(obj->dev->dev,
796 					"could not remap: %d (%d)\n", ret, fmt);
797 				goto fail;
798 			}
799 
800 			/* TODO: enable async refill.. */
801 			ret = tiler_pin(block, omap_obj->pages, npages,
802 					omap_obj->roll, true);
803 			if (ret) {
804 				tiler_release(block);
805 				dev_err(obj->dev->dev,
806 						"could not pin: %d\n", ret);
807 				goto fail;
808 			}
809 
810 			omap_obj->dma_addr = tiler_ssptr(block);
811 			omap_obj->block = block;
812 
813 			DBG("got dma address: %pad", &omap_obj->dma_addr);
814 		}
815 
816 		omap_obj->dma_addr_cnt++;
817 
818 		*dma_addr = omap_obj->dma_addr;
819 	} else if (omap_gem_is_contiguous(omap_obj)) {
820 		*dma_addr = omap_obj->dma_addr;
821 	} else {
822 		ret = -EINVAL;
823 		goto fail;
824 	}
825 
826 fail:
827 	mutex_unlock(&omap_obj->lock);
828 
829 	return ret;
830 }
831 
832 /**
833  * omap_gem_unpin() - Unpin a GEM object from memory
834  * @obj: the GEM object
835  *
836  * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
837  * reference-counted, the actualy unpin will only be performed when the number
838  * of calls to this function matches the number of calls to omap_gem_pin().
839  */
840 void omap_gem_unpin(struct drm_gem_object *obj)
841 {
842 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
843 	int ret;
844 
845 	mutex_lock(&omap_obj->lock);
846 
847 	if (omap_obj->dma_addr_cnt > 0) {
848 		omap_obj->dma_addr_cnt--;
849 		if (omap_obj->dma_addr_cnt == 0) {
850 			ret = tiler_unpin(omap_obj->block);
851 			if (ret) {
852 				dev_err(obj->dev->dev,
853 					"could not unpin pages: %d\n", ret);
854 			}
855 			ret = tiler_release(omap_obj->block);
856 			if (ret) {
857 				dev_err(obj->dev->dev,
858 					"could not release unmap: %d\n", ret);
859 			}
860 			omap_obj->dma_addr = 0;
861 			omap_obj->block = NULL;
862 		}
863 	}
864 
865 	mutex_unlock(&omap_obj->lock);
866 }
867 
868 /* Get rotated scanout address (only valid if already pinned), at the
869  * specified orientation and x,y offset from top-left corner of buffer
870  * (only valid for tiled 2d buffers)
871  */
872 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
873 		int x, int y, dma_addr_t *dma_addr)
874 {
875 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
876 	int ret = -EINVAL;
877 
878 	mutex_lock(&omap_obj->lock);
879 
880 	if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
881 			(omap_obj->flags & OMAP_BO_TILED)) {
882 		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
883 		ret = 0;
884 	}
885 
886 	mutex_unlock(&omap_obj->lock);
887 
888 	return ret;
889 }
890 
891 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
892 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
893 {
894 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
895 	int ret = -EINVAL;
896 	if (omap_obj->flags & OMAP_BO_TILED)
897 		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
898 	return ret;
899 }
900 
901 /* if !remap, and we don't have pages backing, then fail, rather than
902  * increasing the pin count (which we don't really do yet anyways,
903  * because we don't support swapping pages back out).  And 'remap'
904  * might not be quite the right name, but I wanted to keep it working
905  * similarly to omap_gem_pin().  Note though that mutex is not
906  * aquired if !remap (because this can be called in atomic ctxt),
907  * but probably omap_gem_unpin() should be changed to work in the
908  * same way.  If !remap, a matching omap_gem_put_pages() call is not
909  * required (and should not be made).
910  */
911 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
912 		bool remap)
913 {
914 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
915 	int ret = 0;
916 
917 	mutex_lock(&omap_obj->lock);
918 
919 	if (remap) {
920 		ret = omap_gem_attach_pages(obj);
921 		if (ret)
922 			goto unlock;
923 	}
924 
925 	if (!omap_obj->pages) {
926 		ret = -ENOMEM;
927 		goto unlock;
928 	}
929 
930 	*pages = omap_obj->pages;
931 
932 unlock:
933 	mutex_unlock(&omap_obj->lock);
934 
935 	return ret;
936 }
937 
938 /* release pages when DMA no longer being performed */
939 int omap_gem_put_pages(struct drm_gem_object *obj)
940 {
941 	/* do something here if we dynamically attach/detach pages.. at
942 	 * least they would no longer need to be pinned if everyone has
943 	 * released the pages..
944 	 */
945 	return 0;
946 }
947 
948 #ifdef CONFIG_DRM_FBDEV_EMULATION
949 /*
950  * Get kernel virtual address for CPU access.. this more or less only
951  * exists for omap_fbdev.
952  */
953 void *omap_gem_vaddr(struct drm_gem_object *obj)
954 {
955 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
956 	void *vaddr;
957 	int ret;
958 
959 	mutex_lock(&omap_obj->lock);
960 
961 	if (!omap_obj->vaddr) {
962 		ret = omap_gem_attach_pages(obj);
963 		if (ret) {
964 			vaddr = ERR_PTR(ret);
965 			goto unlock;
966 		}
967 
968 		omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
969 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
970 	}
971 
972 	vaddr = omap_obj->vaddr;
973 
974 unlock:
975 	mutex_unlock(&omap_obj->lock);
976 	return vaddr;
977 }
978 #endif
979 
980 /* -----------------------------------------------------------------------------
981  * Power Management
982  */
983 
984 #ifdef CONFIG_PM
985 /* re-pin objects in DMM in resume path: */
986 int omap_gem_resume(struct drm_device *dev)
987 {
988 	struct omap_drm_private *priv = dev->dev_private;
989 	struct omap_gem_object *omap_obj;
990 	int ret = 0;
991 
992 	mutex_lock(&priv->list_lock);
993 	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
994 		if (omap_obj->block) {
995 			struct drm_gem_object *obj = &omap_obj->base;
996 			u32 npages = obj->size >> PAGE_SHIFT;
997 
998 			WARN_ON(!omap_obj->pages);  /* this can't happen */
999 			ret = tiler_pin(omap_obj->block,
1000 					omap_obj->pages, npages,
1001 					omap_obj->roll, true);
1002 			if (ret) {
1003 				dev_err(dev->dev, "could not repin: %d\n", ret);
1004 				goto done;
1005 			}
1006 		}
1007 	}
1008 
1009 done:
1010 	mutex_unlock(&priv->list_lock);
1011 	return ret;
1012 }
1013 #endif
1014 
1015 /* -----------------------------------------------------------------------------
1016  * DebugFS
1017  */
1018 
1019 #ifdef CONFIG_DEBUG_FS
1020 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1021 {
1022 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1023 	u64 off;
1024 
1025 	off = drm_vma_node_start(&obj->vma_node);
1026 
1027 	mutex_lock(&omap_obj->lock);
1028 
1029 	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1030 			omap_obj->flags, obj->name, kref_read(&obj->refcount),
1031 			off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
1032 			omap_obj->vaddr, omap_obj->roll);
1033 
1034 	if (omap_obj->flags & OMAP_BO_TILED) {
1035 		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1036 		if (omap_obj->block) {
1037 			struct tcm_area *area = &omap_obj->block->area;
1038 			seq_printf(m, " (%dx%d, %dx%d)",
1039 					area->p0.x, area->p0.y,
1040 					area->p1.x, area->p1.y);
1041 		}
1042 	} else {
1043 		seq_printf(m, " %zu", obj->size);
1044 	}
1045 
1046 	mutex_unlock(&omap_obj->lock);
1047 
1048 	seq_printf(m, "\n");
1049 }
1050 
1051 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1052 {
1053 	struct omap_gem_object *omap_obj;
1054 	int count = 0;
1055 	size_t size = 0;
1056 
1057 	list_for_each_entry(omap_obj, list, mm_list) {
1058 		struct drm_gem_object *obj = &omap_obj->base;
1059 		seq_printf(m, "   ");
1060 		omap_gem_describe(obj, m);
1061 		count++;
1062 		size += obj->size;
1063 	}
1064 
1065 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1066 }
1067 #endif
1068 
1069 /* -----------------------------------------------------------------------------
1070  * Constructor & Destructor
1071  */
1072 
1073 void omap_gem_free_object(struct drm_gem_object *obj)
1074 {
1075 	struct drm_device *dev = obj->dev;
1076 	struct omap_drm_private *priv = dev->dev_private;
1077 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1078 
1079 	omap_gem_evict(obj);
1080 
1081 	mutex_lock(&priv->list_lock);
1082 	list_del(&omap_obj->mm_list);
1083 	mutex_unlock(&priv->list_lock);
1084 
1085 	/*
1086 	 * We own the sole reference to the object at this point, but to keep
1087 	 * lockdep happy, we must still take the omap_obj_lock to call
1088 	 * omap_gem_detach_pages(). This should hardly make any difference as
1089 	 * there can't be any lock contention.
1090 	 */
1091 	mutex_lock(&omap_obj->lock);
1092 
1093 	/* The object should not be pinned. */
1094 	WARN_ON(omap_obj->dma_addr_cnt > 0);
1095 
1096 	if (omap_obj->pages) {
1097 		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1098 			kfree(omap_obj->pages);
1099 		else
1100 			omap_gem_detach_pages(obj);
1101 	}
1102 
1103 	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1104 		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1105 			    omap_obj->dma_addr);
1106 	} else if (omap_obj->vaddr) {
1107 		vunmap(omap_obj->vaddr);
1108 	} else if (obj->import_attach) {
1109 		drm_prime_gem_destroy(obj, omap_obj->sgt);
1110 	}
1111 
1112 	mutex_unlock(&omap_obj->lock);
1113 
1114 	drm_gem_object_release(obj);
1115 
1116 	mutex_destroy(&omap_obj->lock);
1117 
1118 	kfree(omap_obj);
1119 }
1120 
1121 /* GEM buffer object constructor */
1122 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1123 		union omap_gem_size gsize, u32 flags)
1124 {
1125 	struct omap_drm_private *priv = dev->dev_private;
1126 	struct omap_gem_object *omap_obj;
1127 	struct drm_gem_object *obj;
1128 	struct address_space *mapping;
1129 	size_t size;
1130 	int ret;
1131 
1132 	/* Validate the flags and compute the memory and cache flags. */
1133 	if (flags & OMAP_BO_TILED) {
1134 		if (!priv->usergart) {
1135 			dev_err(dev->dev, "Tiled buffers require DMM\n");
1136 			return NULL;
1137 		}
1138 
1139 		/*
1140 		 * Tiled buffers are always shmem paged backed. When they are
1141 		 * scanned out, they are remapped into DMM/TILER.
1142 		 */
1143 		flags &= ~OMAP_BO_SCANOUT;
1144 		flags |= OMAP_BO_MEM_SHMEM;
1145 
1146 		/*
1147 		 * Currently don't allow cached buffers. There is some caching
1148 		 * stuff that needs to be handled better.
1149 		 */
1150 		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1151 		flags |= tiler_get_cpu_cache_flags();
1152 	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1153 		/*
1154 		 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1155 		 * tiled. However, to lower the pressure on memory allocation,
1156 		 * use contiguous memory only if no TILER is available.
1157 		 */
1158 		flags |= OMAP_BO_MEM_DMA_API;
1159 	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1160 		/*
1161 		 * All other buffers not backed by dma_buf are shmem-backed.
1162 		 */
1163 		flags |= OMAP_BO_MEM_SHMEM;
1164 	}
1165 
1166 	/* Allocate the initialize the OMAP GEM object. */
1167 	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1168 	if (!omap_obj)
1169 		return NULL;
1170 
1171 	obj = &omap_obj->base;
1172 	omap_obj->flags = flags;
1173 	mutex_init(&omap_obj->lock);
1174 
1175 	if (flags & OMAP_BO_TILED) {
1176 		/*
1177 		 * For tiled buffers align dimensions to slot boundaries and
1178 		 * calculate size based on aligned dimensions.
1179 		 */
1180 		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1181 			    &gsize.tiled.height);
1182 
1183 		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1184 				  gsize.tiled.height);
1185 
1186 		omap_obj->width = gsize.tiled.width;
1187 		omap_obj->height = gsize.tiled.height;
1188 	} else {
1189 		size = PAGE_ALIGN(gsize.bytes);
1190 	}
1191 
1192 	/* Initialize the GEM object. */
1193 	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1194 		drm_gem_private_object_init(dev, obj, size);
1195 	} else {
1196 		ret = drm_gem_object_init(dev, obj, size);
1197 		if (ret)
1198 			goto err_free;
1199 
1200 		mapping = obj->filp->f_mapping;
1201 		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1202 	}
1203 
1204 	/* Allocate memory if needed. */
1205 	if (flags & OMAP_BO_MEM_DMA_API) {
1206 		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1207 					       &omap_obj->dma_addr,
1208 					       GFP_KERNEL);
1209 		if (!omap_obj->vaddr)
1210 			goto err_release;
1211 	}
1212 
1213 	mutex_lock(&priv->list_lock);
1214 	list_add(&omap_obj->mm_list, &priv->obj_list);
1215 	mutex_unlock(&priv->list_lock);
1216 
1217 	return obj;
1218 
1219 err_release:
1220 	drm_gem_object_release(obj);
1221 err_free:
1222 	kfree(omap_obj);
1223 	return NULL;
1224 }
1225 
1226 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1227 					   struct sg_table *sgt)
1228 {
1229 	struct omap_drm_private *priv = dev->dev_private;
1230 	struct omap_gem_object *omap_obj;
1231 	struct drm_gem_object *obj;
1232 	union omap_gem_size gsize;
1233 
1234 	/* Without a DMM only physically contiguous buffers can be supported. */
1235 	if (sgt->orig_nents != 1 && !priv->has_dmm)
1236 		return ERR_PTR(-EINVAL);
1237 
1238 	gsize.bytes = PAGE_ALIGN(size);
1239 	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1240 	if (!obj)
1241 		return ERR_PTR(-ENOMEM);
1242 
1243 	omap_obj = to_omap_bo(obj);
1244 
1245 	mutex_lock(&omap_obj->lock);
1246 
1247 	omap_obj->sgt = sgt;
1248 
1249 	if (sgt->orig_nents == 1) {
1250 		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1251 	} else {
1252 		/* Create pages list from sgt */
1253 		struct sg_page_iter iter;
1254 		struct page **pages;
1255 		unsigned int npages;
1256 		unsigned int i = 0;
1257 
1258 		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1259 		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1260 		if (!pages) {
1261 			omap_gem_free_object(obj);
1262 			obj = ERR_PTR(-ENOMEM);
1263 			goto done;
1264 		}
1265 
1266 		omap_obj->pages = pages;
1267 
1268 		for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1269 			pages[i++] = sg_page_iter_page(&iter);
1270 			if (i > npages)
1271 				break;
1272 		}
1273 
1274 		if (WARN_ON(i != npages)) {
1275 			omap_gem_free_object(obj);
1276 			obj = ERR_PTR(-ENOMEM);
1277 			goto done;
1278 		}
1279 	}
1280 
1281 done:
1282 	mutex_unlock(&omap_obj->lock);
1283 	return obj;
1284 }
1285 
1286 /* convenience method to construct a GEM buffer object, and userspace handle */
1287 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1288 		union omap_gem_size gsize, u32 flags, u32 *handle)
1289 {
1290 	struct drm_gem_object *obj;
1291 	int ret;
1292 
1293 	obj = omap_gem_new(dev, gsize, flags);
1294 	if (!obj)
1295 		return -ENOMEM;
1296 
1297 	ret = drm_gem_handle_create(file, obj, handle);
1298 	if (ret) {
1299 		omap_gem_free_object(obj);
1300 		return ret;
1301 	}
1302 
1303 	/* drop reference from allocate - handle holds it now */
1304 	drm_gem_object_put_unlocked(obj);
1305 
1306 	return 0;
1307 }
1308 
1309 /* -----------------------------------------------------------------------------
1310  * Init & Cleanup
1311  */
1312 
1313 /* If DMM is used, we need to set some stuff up.. */
1314 void omap_gem_init(struct drm_device *dev)
1315 {
1316 	struct omap_drm_private *priv = dev->dev_private;
1317 	struct omap_drm_usergart *usergart;
1318 	const enum tiler_fmt fmts[] = {
1319 			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1320 	};
1321 	int i, j;
1322 
1323 	if (!dmm_is_available()) {
1324 		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1325 		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1326 		return;
1327 	}
1328 
1329 	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1330 	if (!usergart)
1331 		return;
1332 
1333 	/* reserve 4k aligned/wide regions for userspace mappings: */
1334 	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1335 		u16 h = 1, w = PAGE_SIZE >> i;
1336 
1337 		tiler_align(fmts[i], &w, &h);
1338 		/* note: since each region is 1 4kb page wide, and minimum
1339 		 * number of rows, the height ends up being the same as the
1340 		 * # of pages in the region
1341 		 */
1342 		usergart[i].height = h;
1343 		usergart[i].height_shift = ilog2(h);
1344 		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1345 		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1346 		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1347 			struct omap_drm_usergart_entry *entry;
1348 			struct tiler_block *block;
1349 
1350 			entry = &usergart[i].entry[j];
1351 			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1352 			if (IS_ERR(block)) {
1353 				dev_err(dev->dev,
1354 						"reserve failed: %d, %d, %ld\n",
1355 						i, j, PTR_ERR(block));
1356 				return;
1357 			}
1358 			entry->dma_addr = tiler_ssptr(block);
1359 			entry->block = block;
1360 
1361 			DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1362 					&entry->dma_addr,
1363 					usergart[i].stride_pfn << PAGE_SHIFT);
1364 		}
1365 	}
1366 
1367 	priv->usergart = usergart;
1368 	priv->has_dmm = true;
1369 }
1370 
1371 void omap_gem_deinit(struct drm_device *dev)
1372 {
1373 	struct omap_drm_private *priv = dev->dev_private;
1374 
1375 	/* I believe we can rely on there being no more outstanding GEM
1376 	 * objects which could depend on usergart/dmm at this point.
1377 	 */
1378 	kfree(priv->usergart);
1379 }
1380