1 /*
2  * DMM IOMMU driver support functions for TI OMAP processors.
3  *
4  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
5  * Author: Rob Clark <rob@ti.com>
6  *         Andy Gross <andy.gross@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as
10  * published by the Free Software Foundation version 2.
11  *
12  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13  * kind, whether express or implied; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/completion.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h> /* platform_device() */
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/time.h>
32 #include <linux/vmalloc.h>
33 #include <linux/wait.h>
34 
35 #include "omap_dmm_tiler.h"
36 #include "omap_dmm_priv.h"
37 
38 #define DMM_DRIVER_NAME "dmm"
39 
40 /* mappings for associating views to luts */
41 static struct tcm *containers[TILFMT_NFORMATS];
42 static struct dmm *omap_dmm;
43 
44 #if defined(CONFIG_OF)
45 static const struct of_device_id dmm_of_match[];
46 #endif
47 
48 /* global spinlock for protecting lists */
49 static DEFINE_SPINLOCK(list_lock);
50 
51 /* Geometry table */
52 #define GEOM(xshift, yshift, bytes_per_pixel) { \
53 		.x_shft = (xshift), \
54 		.y_shft = (yshift), \
55 		.cpp    = (bytes_per_pixel), \
56 		.slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
57 		.slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
58 	}
59 
60 static const struct {
61 	u32 x_shft;	/* unused X-bits (as part of bpp) */
62 	u32 y_shft;	/* unused Y-bits (as part of bpp) */
63 	u32 cpp;		/* bytes/chars per pixel */
64 	u32 slot_w;	/* width of each slot (in pixels) */
65 	u32 slot_h;	/* height of each slot (in pixels) */
66 } geom[TILFMT_NFORMATS] = {
67 	[TILFMT_8BIT]  = GEOM(0, 0, 1),
68 	[TILFMT_16BIT] = GEOM(0, 1, 2),
69 	[TILFMT_32BIT] = GEOM(1, 1, 4),
70 	[TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
71 };
72 
73 
74 /* lookup table for registers w/ per-engine instances */
75 static const u32 reg[][4] = {
76 	[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
77 			DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
78 	[PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
79 			DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
80 };
81 
82 static u32 dmm_read(struct dmm *dmm, u32 reg)
83 {
84 	return readl(dmm->base + reg);
85 }
86 
87 static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
88 {
89 	writel(val, dmm->base + reg);
90 }
91 
92 /* simple allocator to grab next 16 byte aligned memory from txn */
93 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
94 {
95 	void *ptr;
96 	struct refill_engine *engine = txn->engine_handle;
97 
98 	/* dmm programming requires 16 byte aligned addresses */
99 	txn->current_pa = round_up(txn->current_pa, 16);
100 	txn->current_va = (void *)round_up((long)txn->current_va, 16);
101 
102 	ptr = txn->current_va;
103 	*pa = txn->current_pa;
104 
105 	txn->current_pa += sz;
106 	txn->current_va += sz;
107 
108 	BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
109 
110 	return ptr;
111 }
112 
113 /* check status and spin until wait_mask comes true */
114 static int wait_status(struct refill_engine *engine, u32 wait_mask)
115 {
116 	struct dmm *dmm = engine->dmm;
117 	u32 r = 0, err, i;
118 
119 	i = DMM_FIXED_RETRY_COUNT;
120 	while (true) {
121 		r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
122 		err = r & DMM_PATSTATUS_ERR;
123 		if (err) {
124 			dev_err(dmm->dev,
125 				"%s: error (engine%d). PAT_STATUS: 0x%08x\n",
126 				__func__, engine->id, r);
127 			return -EFAULT;
128 		}
129 
130 		if ((r & wait_mask) == wait_mask)
131 			break;
132 
133 		if (--i == 0) {
134 			dev_err(dmm->dev,
135 				"%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
136 				__func__, engine->id, r);
137 			return -ETIMEDOUT;
138 		}
139 
140 		udelay(1);
141 	}
142 
143 	return 0;
144 }
145 
146 static void release_engine(struct refill_engine *engine)
147 {
148 	unsigned long flags;
149 
150 	spin_lock_irqsave(&list_lock, flags);
151 	list_add(&engine->idle_node, &omap_dmm->idle_head);
152 	spin_unlock_irqrestore(&list_lock, flags);
153 
154 	atomic_inc(&omap_dmm->engine_counter);
155 	wake_up_interruptible(&omap_dmm->engine_queue);
156 }
157 
158 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
159 {
160 	struct dmm *dmm = arg;
161 	u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
162 	int i;
163 
164 	/* ack IRQ */
165 	dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
166 
167 	for (i = 0; i < dmm->num_engines; i++) {
168 		if (status & DMM_IRQSTAT_ERR_MASK)
169 			dev_err(dmm->dev,
170 				"irq error(engine%d): IRQSTAT 0x%02x\n",
171 				i, status & 0xff);
172 
173 		if (status & DMM_IRQSTAT_LST) {
174 			if (dmm->engines[i].async)
175 				release_engine(&dmm->engines[i]);
176 
177 			complete(&dmm->engines[i].compl);
178 		}
179 
180 		status >>= 8;
181 	}
182 
183 	return IRQ_HANDLED;
184 }
185 
186 /**
187  * Get a handle for a DMM transaction
188  */
189 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
190 {
191 	struct dmm_txn *txn = NULL;
192 	struct refill_engine *engine = NULL;
193 	int ret;
194 	unsigned long flags;
195 
196 
197 	/* wait until an engine is available */
198 	ret = wait_event_interruptible(omap_dmm->engine_queue,
199 		atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
200 	if (ret)
201 		return ERR_PTR(ret);
202 
203 	/* grab an idle engine */
204 	spin_lock_irqsave(&list_lock, flags);
205 	if (!list_empty(&dmm->idle_head)) {
206 		engine = list_entry(dmm->idle_head.next, struct refill_engine,
207 					idle_node);
208 		list_del(&engine->idle_node);
209 	}
210 	spin_unlock_irqrestore(&list_lock, flags);
211 
212 	BUG_ON(!engine);
213 
214 	txn = &engine->txn;
215 	engine->tcm = tcm;
216 	txn->engine_handle = engine;
217 	txn->last_pat = NULL;
218 	txn->current_va = engine->refill_va;
219 	txn->current_pa = engine->refill_pa;
220 
221 	return txn;
222 }
223 
224 /**
225  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
226  * corresponding slot is cleared (ie. dummy_pa is programmed)
227  */
228 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
229 		struct page **pages, u32 npages, u32 roll)
230 {
231 	dma_addr_t pat_pa = 0, data_pa = 0;
232 	u32 *data;
233 	struct pat *pat;
234 	struct refill_engine *engine = txn->engine_handle;
235 	int columns = (1 + area->x1 - area->x0);
236 	int rows = (1 + area->y1 - area->y0);
237 	int i = columns*rows;
238 
239 	pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
240 
241 	if (txn->last_pat)
242 		txn->last_pat->next_pa = (u32)pat_pa;
243 
244 	pat->area = *area;
245 
246 	/* adjust Y coordinates based off of container parameters */
247 	pat->area.y0 += engine->tcm->y_offset;
248 	pat->area.y1 += engine->tcm->y_offset;
249 
250 	pat->ctrl = (struct pat_ctrl){
251 			.start = 1,
252 			.lut_id = engine->tcm->lut_id,
253 		};
254 
255 	data = alloc_dma(txn, 4*i, &data_pa);
256 	/* FIXME: what if data_pa is more than 32-bit ? */
257 	pat->data_pa = data_pa;
258 
259 	while (i--) {
260 		int n = i + roll;
261 		if (n >= npages)
262 			n -= npages;
263 		data[i] = (pages && pages[n]) ?
264 			page_to_phys(pages[n]) : engine->dmm->dummy_pa;
265 	}
266 
267 	txn->last_pat = pat;
268 
269 	return;
270 }
271 
272 /**
273  * Commit the DMM transaction.
274  */
275 static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
276 {
277 	int ret = 0;
278 	struct refill_engine *engine = txn->engine_handle;
279 	struct dmm *dmm = engine->dmm;
280 
281 	if (!txn->last_pat) {
282 		dev_err(engine->dmm->dev, "need at least one txn\n");
283 		ret = -EINVAL;
284 		goto cleanup;
285 	}
286 
287 	txn->last_pat->next_pa = 0;
288 
289 	/* write to PAT_DESCR to clear out any pending transaction */
290 	dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
291 
292 	/* wait for engine ready: */
293 	ret = wait_status(engine, DMM_PATSTATUS_READY);
294 	if (ret) {
295 		ret = -EFAULT;
296 		goto cleanup;
297 	}
298 
299 	/* mark whether it is async to denote list management in IRQ handler */
300 	engine->async = wait ? false : true;
301 	reinit_completion(&engine->compl);
302 	/* verify that the irq handler sees the 'async' and completion value */
303 	smp_mb();
304 
305 	/* kick reload */
306 	dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
307 
308 	if (wait) {
309 		if (!wait_for_completion_timeout(&engine->compl,
310 				msecs_to_jiffies(100))) {
311 			dev_err(dmm->dev, "timed out waiting for done\n");
312 			ret = -ETIMEDOUT;
313 			goto cleanup;
314 		}
315 
316 		/* Check the engine status before continue */
317 		ret = wait_status(engine, DMM_PATSTATUS_READY |
318 				  DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
319 	}
320 
321 cleanup:
322 	/* only place engine back on list if we are done with it */
323 	if (ret || wait)
324 		release_engine(engine);
325 
326 	return ret;
327 }
328 
329 /*
330  * DMM programming
331  */
332 static int fill(struct tcm_area *area, struct page **pages,
333 		u32 npages, u32 roll, bool wait)
334 {
335 	int ret = 0;
336 	struct tcm_area slice, area_s;
337 	struct dmm_txn *txn;
338 
339 	/*
340 	 * FIXME
341 	 *
342 	 * Asynchronous fill does not work reliably, as the driver does not
343 	 * handle errors in the async code paths. The fill operation may
344 	 * silently fail, leading to leaking DMM engines, which may eventually
345 	 * lead to deadlock if we run out of DMM engines.
346 	 *
347 	 * For now, always set 'wait' so that we only use sync fills. Async
348 	 * fills should be fixed, or alternatively we could decide to only
349 	 * support sync fills and so the whole async code path could be removed.
350 	 */
351 
352 	wait = true;
353 
354 	txn = dmm_txn_init(omap_dmm, area->tcm);
355 	if (IS_ERR_OR_NULL(txn))
356 		return -ENOMEM;
357 
358 	tcm_for_each_slice(slice, *area, area_s) {
359 		struct pat_area p_area = {
360 				.x0 = slice.p0.x,  .y0 = slice.p0.y,
361 				.x1 = slice.p1.x,  .y1 = slice.p1.y,
362 		};
363 
364 		dmm_txn_append(txn, &p_area, pages, npages, roll);
365 
366 		roll += tcm_sizeof(slice);
367 	}
368 
369 	ret = dmm_txn_commit(txn, wait);
370 
371 	return ret;
372 }
373 
374 /*
375  * Pin/unpin
376  */
377 
378 /* note: slots for which pages[i] == NULL are filled w/ dummy page
379  */
380 int tiler_pin(struct tiler_block *block, struct page **pages,
381 		u32 npages, u32 roll, bool wait)
382 {
383 	int ret;
384 
385 	ret = fill(&block->area, pages, npages, roll, wait);
386 
387 	if (ret)
388 		tiler_unpin(block);
389 
390 	return ret;
391 }
392 
393 int tiler_unpin(struct tiler_block *block)
394 {
395 	return fill(&block->area, NULL, 0, 0, false);
396 }
397 
398 /*
399  * Reserve/release
400  */
401 struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
402 		u16 h, u16 align)
403 {
404 	struct tiler_block *block;
405 	u32 min_align = 128;
406 	int ret;
407 	unsigned long flags;
408 	u32 slot_bytes;
409 
410 	block = kzalloc(sizeof(*block), GFP_KERNEL);
411 	if (!block)
412 		return ERR_PTR(-ENOMEM);
413 
414 	BUG_ON(!validfmt(fmt));
415 
416 	/* convert width/height to slots */
417 	w = DIV_ROUND_UP(w, geom[fmt].slot_w);
418 	h = DIV_ROUND_UP(h, geom[fmt].slot_h);
419 
420 	/* convert alignment to slots */
421 	slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
422 	min_align = max(min_align, slot_bytes);
423 	align = (align > min_align) ? ALIGN(align, min_align) : min_align;
424 	align /= slot_bytes;
425 
426 	block->fmt = fmt;
427 
428 	ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
429 			&block->area);
430 	if (ret) {
431 		kfree(block);
432 		return ERR_PTR(-ENOMEM);
433 	}
434 
435 	/* add to allocation list */
436 	spin_lock_irqsave(&list_lock, flags);
437 	list_add(&block->alloc_node, &omap_dmm->alloc_head);
438 	spin_unlock_irqrestore(&list_lock, flags);
439 
440 	return block;
441 }
442 
443 struct tiler_block *tiler_reserve_1d(size_t size)
444 {
445 	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
446 	int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
447 	unsigned long flags;
448 
449 	if (!block)
450 		return ERR_PTR(-ENOMEM);
451 
452 	block->fmt = TILFMT_PAGE;
453 
454 	if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
455 				&block->area)) {
456 		kfree(block);
457 		return ERR_PTR(-ENOMEM);
458 	}
459 
460 	spin_lock_irqsave(&list_lock, flags);
461 	list_add(&block->alloc_node, &omap_dmm->alloc_head);
462 	spin_unlock_irqrestore(&list_lock, flags);
463 
464 	return block;
465 }
466 
467 /* note: if you have pin'd pages, you should have already unpin'd first! */
468 int tiler_release(struct tiler_block *block)
469 {
470 	int ret = tcm_free(&block->area);
471 	unsigned long flags;
472 
473 	if (block->area.tcm)
474 		dev_err(omap_dmm->dev, "failed to release block\n");
475 
476 	spin_lock_irqsave(&list_lock, flags);
477 	list_del(&block->alloc_node);
478 	spin_unlock_irqrestore(&list_lock, flags);
479 
480 	kfree(block);
481 	return ret;
482 }
483 
484 /*
485  * Utils
486  */
487 
488 /* calculate the tiler space address of a pixel in a view orientation...
489  * below description copied from the display subsystem section of TRM:
490  *
491  * When the TILER is addressed, the bits:
492  *   [28:27] = 0x0 for 8-bit tiled
493  *             0x1 for 16-bit tiled
494  *             0x2 for 32-bit tiled
495  *             0x3 for page mode
496  *   [31:29] = 0x0 for 0-degree view
497  *             0x1 for 180-degree view + mirroring
498  *             0x2 for 0-degree view + mirroring
499  *             0x3 for 180-degree view
500  *             0x4 for 270-degree view + mirroring
501  *             0x5 for 270-degree view
502  *             0x6 for 90-degree view
503  *             0x7 for 90-degree view + mirroring
504  * Otherwise the bits indicated the corresponding bit address to access
505  * the SDRAM.
506  */
507 static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
508 {
509 	u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
510 
511 	x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
512 	y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
513 	alignment = geom[fmt].x_shft + geom[fmt].y_shft;
514 
515 	/* validate coordinate */
516 	x_mask = MASK(x_bits);
517 	y_mask = MASK(y_bits);
518 
519 	if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
520 		DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
521 				x, x, x_mask, y, y, y_mask);
522 		return 0;
523 	}
524 
525 	/* account for mirroring */
526 	if (orient & MASK_X_INVERT)
527 		x ^= x_mask;
528 	if (orient & MASK_Y_INVERT)
529 		y ^= y_mask;
530 
531 	/* get coordinate address */
532 	if (orient & MASK_XY_FLIP)
533 		tmp = ((x << y_bits) + y);
534 	else
535 		tmp = ((y << x_bits) + x);
536 
537 	return TIL_ADDR((tmp << alignment), orient, fmt);
538 }
539 
540 dma_addr_t tiler_ssptr(struct tiler_block *block)
541 {
542 	BUG_ON(!validfmt(block->fmt));
543 
544 	return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
545 			block->area.p0.x * geom[block->fmt].slot_w,
546 			block->area.p0.y * geom[block->fmt].slot_h);
547 }
548 
549 dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
550 		u32 x, u32 y)
551 {
552 	struct tcm_pt *p = &block->area.p0;
553 	BUG_ON(!validfmt(block->fmt));
554 
555 	return tiler_get_address(block->fmt, orient,
556 			(p->x * geom[block->fmt].slot_w) + x,
557 			(p->y * geom[block->fmt].slot_h) + y);
558 }
559 
560 void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h)
561 {
562 	BUG_ON(!validfmt(fmt));
563 	*w = round_up(*w, geom[fmt].slot_w);
564 	*h = round_up(*h, geom[fmt].slot_h);
565 }
566 
567 u32 tiler_stride(enum tiler_fmt fmt, u32 orient)
568 {
569 	BUG_ON(!validfmt(fmt));
570 
571 	if (orient & MASK_XY_FLIP)
572 		return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
573 	else
574 		return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
575 }
576 
577 size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h)
578 {
579 	tiler_align(fmt, &w, &h);
580 	return geom[fmt].cpp * w * h;
581 }
582 
583 size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h)
584 {
585 	BUG_ON(!validfmt(fmt));
586 	return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
587 }
588 
589 u32 tiler_get_cpu_cache_flags(void)
590 {
591 	return omap_dmm->plat_data->cpu_cache_flags;
592 }
593 
594 bool dmm_is_available(void)
595 {
596 	return omap_dmm ? true : false;
597 }
598 
599 static int omap_dmm_remove(struct platform_device *dev)
600 {
601 	struct tiler_block *block, *_block;
602 	int i;
603 	unsigned long flags;
604 
605 	if (omap_dmm) {
606 		/* free all area regions */
607 		spin_lock_irqsave(&list_lock, flags);
608 		list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
609 					alloc_node) {
610 			list_del(&block->alloc_node);
611 			kfree(block);
612 		}
613 		spin_unlock_irqrestore(&list_lock, flags);
614 
615 		for (i = 0; i < omap_dmm->num_lut; i++)
616 			if (omap_dmm->tcm && omap_dmm->tcm[i])
617 				omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
618 		kfree(omap_dmm->tcm);
619 
620 		kfree(omap_dmm->engines);
621 		if (omap_dmm->refill_va)
622 			dma_free_wc(omap_dmm->dev,
623 				    REFILL_BUFFER_SIZE * omap_dmm->num_engines,
624 				    omap_dmm->refill_va, omap_dmm->refill_pa);
625 		if (omap_dmm->dummy_page)
626 			__free_page(omap_dmm->dummy_page);
627 
628 		if (omap_dmm->irq > 0)
629 			free_irq(omap_dmm->irq, omap_dmm);
630 
631 		iounmap(omap_dmm->base);
632 		kfree(omap_dmm);
633 		omap_dmm = NULL;
634 	}
635 
636 	return 0;
637 }
638 
639 static int omap_dmm_probe(struct platform_device *dev)
640 {
641 	int ret = -EFAULT, i;
642 	struct tcm_area area = {0};
643 	u32 hwinfo, pat_geom;
644 	struct resource *mem;
645 
646 	omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
647 	if (!omap_dmm)
648 		goto fail;
649 
650 	/* initialize lists */
651 	INIT_LIST_HEAD(&omap_dmm->alloc_head);
652 	INIT_LIST_HEAD(&omap_dmm->idle_head);
653 
654 	init_waitqueue_head(&omap_dmm->engine_queue);
655 
656 	if (dev->dev.of_node) {
657 		const struct of_device_id *match;
658 
659 		match = of_match_node(dmm_of_match, dev->dev.of_node);
660 		if (!match) {
661 			dev_err(&dev->dev, "failed to find matching device node\n");
662 			ret = -ENODEV;
663 			goto fail;
664 		}
665 
666 		omap_dmm->plat_data = match->data;
667 	}
668 
669 	/* lookup hwmod data - base address and irq */
670 	mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
671 	if (!mem) {
672 		dev_err(&dev->dev, "failed to get base address resource\n");
673 		goto fail;
674 	}
675 
676 	omap_dmm->base = ioremap(mem->start, SZ_2K);
677 
678 	if (!omap_dmm->base) {
679 		dev_err(&dev->dev, "failed to get dmm base address\n");
680 		goto fail;
681 	}
682 
683 	omap_dmm->irq = platform_get_irq(dev, 0);
684 	if (omap_dmm->irq < 0) {
685 		dev_err(&dev->dev, "failed to get IRQ resource\n");
686 		goto fail;
687 	}
688 
689 	omap_dmm->dev = &dev->dev;
690 
691 	hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
692 	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
693 	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
694 	omap_dmm->container_width = 256;
695 	omap_dmm->container_height = 128;
696 
697 	atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
698 
699 	/* read out actual LUT width and height */
700 	pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
701 	omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
702 	omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
703 
704 	/* increment LUT by one if on OMAP5 */
705 	/* LUT has twice the height, and is split into a separate container */
706 	if (omap_dmm->lut_height != omap_dmm->container_height)
707 		omap_dmm->num_lut++;
708 
709 	/* initialize DMM registers */
710 	dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
711 	dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
712 	dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
713 	dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
714 	dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
715 	dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
716 
717 	ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
718 				"omap_dmm_irq_handler", omap_dmm);
719 
720 	if (ret) {
721 		dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
722 			omap_dmm->irq, ret);
723 		omap_dmm->irq = -1;
724 		goto fail;
725 	}
726 
727 	/* Enable all interrupts for each refill engine except
728 	 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
729 	 * about because we want to be able to refill live scanout
730 	 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
731 	 * we just generally don't care about.
732 	 */
733 	dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
734 
735 	omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
736 	if (!omap_dmm->dummy_page) {
737 		dev_err(&dev->dev, "could not allocate dummy page\n");
738 		ret = -ENOMEM;
739 		goto fail;
740 	}
741 
742 	/* set dma mask for device */
743 	ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
744 	if (ret)
745 		goto fail;
746 
747 	omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
748 
749 	/* alloc refill memory */
750 	omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
751 					   REFILL_BUFFER_SIZE * omap_dmm->num_engines,
752 					   &omap_dmm->refill_pa, GFP_KERNEL);
753 	if (!omap_dmm->refill_va) {
754 		dev_err(&dev->dev, "could not allocate refill memory\n");
755 		goto fail;
756 	}
757 
758 	/* alloc engines */
759 	omap_dmm->engines = kcalloc(omap_dmm->num_engines,
760 				    sizeof(*omap_dmm->engines), GFP_KERNEL);
761 	if (!omap_dmm->engines) {
762 		ret = -ENOMEM;
763 		goto fail;
764 	}
765 
766 	for (i = 0; i < omap_dmm->num_engines; i++) {
767 		omap_dmm->engines[i].id = i;
768 		omap_dmm->engines[i].dmm = omap_dmm;
769 		omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
770 						(REFILL_BUFFER_SIZE * i);
771 		omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
772 						(REFILL_BUFFER_SIZE * i);
773 		init_completion(&omap_dmm->engines[i].compl);
774 
775 		list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
776 	}
777 
778 	omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
779 				GFP_KERNEL);
780 	if (!omap_dmm->tcm) {
781 		ret = -ENOMEM;
782 		goto fail;
783 	}
784 
785 	/* init containers */
786 	/* Each LUT is associated with a TCM (container manager).  We use the
787 	   lut_id to denote the lut_id used to identify the correct LUT for
788 	   programming during reill operations */
789 	for (i = 0; i < omap_dmm->num_lut; i++) {
790 		omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
791 						omap_dmm->container_height);
792 
793 		if (!omap_dmm->tcm[i]) {
794 			dev_err(&dev->dev, "failed to allocate container\n");
795 			ret = -ENOMEM;
796 			goto fail;
797 		}
798 
799 		omap_dmm->tcm[i]->lut_id = i;
800 	}
801 
802 	/* assign access mode containers to applicable tcm container */
803 	/* OMAP 4 has 1 container for all 4 views */
804 	/* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
805 	containers[TILFMT_8BIT] = omap_dmm->tcm[0];
806 	containers[TILFMT_16BIT] = omap_dmm->tcm[0];
807 	containers[TILFMT_32BIT] = omap_dmm->tcm[0];
808 
809 	if (omap_dmm->container_height != omap_dmm->lut_height) {
810 		/* second LUT is used for PAGE mode.  Programming must use
811 		   y offset that is added to all y coordinates.  LUT id is still
812 		   0, because it is the same LUT, just the upper 128 lines */
813 		containers[TILFMT_PAGE] = omap_dmm->tcm[1];
814 		omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
815 		omap_dmm->tcm[1]->lut_id = 0;
816 	} else {
817 		containers[TILFMT_PAGE] = omap_dmm->tcm[0];
818 	}
819 
820 	area = (struct tcm_area) {
821 		.tcm = NULL,
822 		.p1.x = omap_dmm->container_width - 1,
823 		.p1.y = omap_dmm->container_height - 1,
824 	};
825 
826 	/* initialize all LUTs to dummy page entries */
827 	for (i = 0; i < omap_dmm->num_lut; i++) {
828 		area.tcm = omap_dmm->tcm[i];
829 		if (fill(&area, NULL, 0, 0, true))
830 			dev_err(omap_dmm->dev, "refill failed");
831 	}
832 
833 	dev_info(omap_dmm->dev, "initialized all PAT entries\n");
834 
835 	return 0;
836 
837 fail:
838 	if (omap_dmm_remove(dev))
839 		dev_err(&dev->dev, "cleanup failed\n");
840 	return ret;
841 }
842 
843 /*
844  * debugfs support
845  */
846 
847 #ifdef CONFIG_DEBUG_FS
848 
849 static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
850 				"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
851 static const char *special = ".,:;'\"`~!^-+";
852 
853 static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
854 							char c, bool ovw)
855 {
856 	int x, y;
857 	for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
858 		for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
859 			if (map[y][x] == ' ' || ovw)
860 				map[y][x] = c;
861 }
862 
863 static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
864 									char c)
865 {
866 	map[p->y / ydiv][p->x / xdiv] = c;
867 }
868 
869 static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
870 {
871 	return map[p->y / ydiv][p->x / xdiv];
872 }
873 
874 static int map_width(int xdiv, int x0, int x1)
875 {
876 	return (x1 / xdiv) - (x0 / xdiv) + 1;
877 }
878 
879 static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
880 {
881 	char *p = map[yd] + (x0 / xdiv);
882 	int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
883 	if (w >= 0) {
884 		p += w;
885 		while (*nice)
886 			*p++ = *nice++;
887 	}
888 }
889 
890 static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
891 							struct tcm_area *a)
892 {
893 	sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
894 	if (a->p0.y + 1 < a->p1.y) {
895 		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
896 							256 - 1);
897 	} else if (a->p0.y < a->p1.y) {
898 		if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
899 			text_map(map, xdiv, nice, a->p0.y / ydiv,
900 					a->p0.x + xdiv,	256 - 1);
901 		else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
902 			text_map(map, xdiv, nice, a->p1.y / ydiv,
903 					0, a->p1.y - xdiv);
904 	} else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
905 		text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
906 	}
907 }
908 
909 static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
910 							struct tcm_area *a)
911 {
912 	sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
913 	if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
914 		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
915 							a->p0.x, a->p1.x);
916 }
917 
918 int tiler_map_show(struct seq_file *s, void *arg)
919 {
920 	int xdiv = 2, ydiv = 1;
921 	char **map = NULL, *global_map;
922 	struct tiler_block *block;
923 	struct tcm_area a, p;
924 	int i;
925 	const char *m2d = alphabet;
926 	const char *a2d = special;
927 	const char *m2dp = m2d, *a2dp = a2d;
928 	char nice[128];
929 	int h_adj;
930 	int w_adj;
931 	unsigned long flags;
932 	int lut_idx;
933 
934 
935 	if (!omap_dmm) {
936 		/* early return if dmm/tiler device is not initialized */
937 		return 0;
938 	}
939 
940 	h_adj = omap_dmm->container_height / ydiv;
941 	w_adj = omap_dmm->container_width / xdiv;
942 
943 	map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL);
944 	global_map = kmalloc_array(w_adj + 1, h_adj, GFP_KERNEL);
945 
946 	if (!map || !global_map)
947 		goto error;
948 
949 	for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
950 		memset(map, 0, h_adj * sizeof(*map));
951 		memset(global_map, ' ', (w_adj + 1) * h_adj);
952 
953 		for (i = 0; i < omap_dmm->container_height; i++) {
954 			map[i] = global_map + i * (w_adj + 1);
955 			map[i][w_adj] = 0;
956 		}
957 
958 		spin_lock_irqsave(&list_lock, flags);
959 
960 		list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
961 			if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
962 				if (block->fmt != TILFMT_PAGE) {
963 					fill_map(map, xdiv, ydiv, &block->area,
964 						*m2dp, true);
965 					if (!*++a2dp)
966 						a2dp = a2d;
967 					if (!*++m2dp)
968 						m2dp = m2d;
969 					map_2d_info(map, xdiv, ydiv, nice,
970 							&block->area);
971 				} else {
972 					bool start = read_map_pt(map, xdiv,
973 						ydiv, &block->area.p0) == ' ';
974 					bool end = read_map_pt(map, xdiv, ydiv,
975 							&block->area.p1) == ' ';
976 
977 					tcm_for_each_slice(a, block->area, p)
978 						fill_map(map, xdiv, ydiv, &a,
979 							'=', true);
980 					fill_map_pt(map, xdiv, ydiv,
981 							&block->area.p0,
982 							start ? '<' : 'X');
983 					fill_map_pt(map, xdiv, ydiv,
984 							&block->area.p1,
985 							end ? '>' : 'X');
986 					map_1d_info(map, xdiv, ydiv, nice,
987 							&block->area);
988 				}
989 			}
990 		}
991 
992 		spin_unlock_irqrestore(&list_lock, flags);
993 
994 		if (s) {
995 			seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
996 			for (i = 0; i < 128; i++)
997 				seq_printf(s, "%03d:%s\n", i, map[i]);
998 			seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
999 		} else {
1000 			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
1001 				lut_idx);
1002 			for (i = 0; i < 128; i++)
1003 				dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
1004 			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
1005 				lut_idx);
1006 		}
1007 	}
1008 
1009 error:
1010 	kfree(map);
1011 	kfree(global_map);
1012 
1013 	return 0;
1014 }
1015 #endif
1016 
1017 #ifdef CONFIG_PM_SLEEP
1018 static int omap_dmm_resume(struct device *dev)
1019 {
1020 	struct tcm_area area;
1021 	int i;
1022 
1023 	if (!omap_dmm)
1024 		return -ENODEV;
1025 
1026 	area = (struct tcm_area) {
1027 		.tcm = NULL,
1028 		.p1.x = omap_dmm->container_width - 1,
1029 		.p1.y = omap_dmm->container_height - 1,
1030 	};
1031 
1032 	/* initialize all LUTs to dummy page entries */
1033 	for (i = 0; i < omap_dmm->num_lut; i++) {
1034 		area.tcm = omap_dmm->tcm[i];
1035 		if (fill(&area, NULL, 0, 0, true))
1036 			dev_err(dev, "refill failed");
1037 	}
1038 
1039 	return 0;
1040 }
1041 #endif
1042 
1043 static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
1044 
1045 #if defined(CONFIG_OF)
1046 static const struct dmm_platform_data dmm_omap4_platform_data = {
1047 	.cpu_cache_flags = OMAP_BO_WC,
1048 };
1049 
1050 static const struct dmm_platform_data dmm_omap5_platform_data = {
1051 	.cpu_cache_flags = OMAP_BO_UNCACHED,
1052 };
1053 
1054 static const struct of_device_id dmm_of_match[] = {
1055 	{
1056 		.compatible = "ti,omap4-dmm",
1057 		.data = &dmm_omap4_platform_data,
1058 	},
1059 	{
1060 		.compatible = "ti,omap5-dmm",
1061 		.data = &dmm_omap5_platform_data,
1062 	},
1063 	{},
1064 };
1065 #endif
1066 
1067 struct platform_driver omap_dmm_driver = {
1068 	.probe = omap_dmm_probe,
1069 	.remove = omap_dmm_remove,
1070 	.driver = {
1071 		.owner = THIS_MODULE,
1072 		.name = DMM_DRIVER_NAME,
1073 		.of_match_table = of_match_ptr(dmm_of_match),
1074 		.pm = &omap_dmm_pm_ops,
1075 	},
1076 };
1077 
1078 MODULE_LICENSE("GPL v2");
1079 MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1080 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
1081