xref: /openbmc/linux/drivers/video/fbdev/pxa3xx-gcu.c (revision e3d786a3)
1 /*
2  *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
3  *
4  *  This driver needs a DirectFB counterpart in user space, communication
5  *  is handled via mmap()ed memory areas and an ioctl.
6  *
7  *  Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
8  *  Copyright (c) 2009 Janine Kropp <nin@directfb.org>
9  *  Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2 of the License, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; if not, write to the Free Software
23  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 /*
27  * WARNING: This controller is attached to System Bus 2 of the PXA which
28  * needs its arbiter to be enabled explicitly (CKENB & 1<<9).
29  * There is currently no way to do this from Linux, so you need to teach
30  * your bootloader for now.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/miscdevice.h>
37 #include <linux/interrupt.h>
38 #include <linux/spinlock.h>
39 #include <linux/uaccess.h>
40 #include <linux/ioctl.h>
41 #include <linux/delay.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/clk.h>
45 #include <linux/fs.h>
46 #include <linux/io.h>
47 #include <linux/of.h>
48 
49 #include "pxa3xx-gcu.h"
50 
51 #define DRV_NAME	"pxa3xx-gcu"
52 #define MISCDEV_MINOR	197
53 
54 #define REG_GCCR	0x00
55 #define GCCR_SYNC_CLR	(1 << 9)
56 #define GCCR_BP_RST	(1 << 8)
57 #define GCCR_ABORT	(1 << 6)
58 #define GCCR_STOP	(1 << 4)
59 
60 #define REG_GCISCR	0x04
61 #define REG_GCIECR	0x08
62 #define REG_GCRBBR	0x20
63 #define REG_GCRBLR	0x24
64 #define REG_GCRBHR	0x28
65 #define REG_GCRBTR	0x2C
66 #define REG_GCRBEXHR	0x30
67 
68 #define IE_EOB		(1 << 0)
69 #define IE_EEOB		(1 << 5)
70 #define IE_ALL		0xff
71 
72 #define SHARED_SIZE	PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared))
73 
74 /* #define PXA3XX_GCU_DEBUG */
75 /* #define PXA3XX_GCU_DEBUG_TIMER */
76 
77 #ifdef PXA3XX_GCU_DEBUG
78 #define QDUMP(msg)					\
79 	do {						\
80 		QPRINT(priv, KERN_DEBUG, msg);		\
81 	} while (0)
82 #else
83 #define QDUMP(msg)	do {} while (0)
84 #endif
85 
86 #define QERROR(msg)					\
87 	do {						\
88 		QPRINT(priv, KERN_ERR, msg);		\
89 	} while (0)
90 
91 struct pxa3xx_gcu_batch {
92 	struct pxa3xx_gcu_batch *next;
93 	u32			*ptr;
94 	dma_addr_t		 phys;
95 	unsigned long		 length;
96 };
97 
98 struct pxa3xx_gcu_priv {
99 	void __iomem		 *mmio_base;
100 	struct clk		 *clk;
101 	struct pxa3xx_gcu_shared *shared;
102 	dma_addr_t		  shared_phys;
103 	struct resource		 *resource_mem;
104 	struct miscdevice	  misc_dev;
105 	wait_queue_head_t	  wait_idle;
106 	wait_queue_head_t	  wait_free;
107 	spinlock_t		  spinlock;
108 	struct timespec64	  base_time;
109 
110 	struct pxa3xx_gcu_batch *free;
111 	struct pxa3xx_gcu_batch *ready;
112 	struct pxa3xx_gcu_batch *ready_last;
113 	struct pxa3xx_gcu_batch *running;
114 };
115 
116 static inline unsigned long
117 gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off)
118 {
119 	return __raw_readl(priv->mmio_base + off);
120 }
121 
122 static inline void
123 gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val)
124 {
125 	__raw_writel(val, priv->mmio_base + off);
126 }
127 
128 #define QPRINT(priv, level, msg)					\
129 	do {								\
130 		struct timespec64 ts;					\
131 		struct pxa3xx_gcu_shared *shared = priv->shared;	\
132 		u32 base = gc_readl(priv, REG_GCRBBR);			\
133 									\
134 		ktime_get_ts64(&ts);					\
135 		ts = timespec64_sub(ts, priv->base_time);		\
136 									\
137 		printk(level "%lld.%03ld.%03ld - %-17s: %-21s (%s, "	\
138 			"STATUS "					\
139 			"0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, "	\
140 			"T %5ld)\n",					\
141 			(s64)(ts.tv_sec),				\
142 			ts.tv_nsec / NSEC_PER_MSEC,			\
143 			(ts.tv_nsec % NSEC_PER_MSEC) / USEC_PER_MSEC,	\
144 			__func__, msg,					\
145 			shared->hw_running ? "running" : "   idle",	\
146 			gc_readl(priv, REG_GCISCR),			\
147 			gc_readl(priv, REG_GCRBBR),			\
148 			gc_readl(priv, REG_GCRBLR),			\
149 			(gc_readl(priv, REG_GCRBEXHR) - base) / 4,	\
150 			(gc_readl(priv, REG_GCRBHR) - base) / 4,	\
151 			(gc_readl(priv, REG_GCRBTR) - base) / 4);	\
152 	} while (0)
153 
154 static void
155 pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
156 {
157 	QDUMP("RESET");
158 
159 	/* disable interrupts */
160 	gc_writel(priv, REG_GCIECR, 0);
161 
162 	/* reset hardware */
163 	gc_writel(priv, REG_GCCR, GCCR_ABORT);
164 	gc_writel(priv, REG_GCCR, 0);
165 
166 	memset(priv->shared, 0, SHARED_SIZE);
167 	priv->shared->buffer_phys = priv->shared_phys;
168 	priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
169 
170 	ktime_get_ts64(&priv->base_time);
171 
172 	/* set up the ring buffer pointers */
173 	gc_writel(priv, REG_GCRBLR, 0);
174 	gc_writel(priv, REG_GCRBBR, priv->shared_phys);
175 	gc_writel(priv, REG_GCRBTR, priv->shared_phys);
176 
177 	/* enable all IRQs except EOB */
178 	gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
179 }
180 
181 static void
182 dump_whole_state(struct pxa3xx_gcu_priv *priv)
183 {
184 	struct pxa3xx_gcu_shared *sh = priv->shared;
185 	u32 base = gc_readl(priv, REG_GCRBBR);
186 
187 	QDUMP("DUMP");
188 
189 	printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
190 		"%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
191 		sh->hw_running ? "running" : "idle   ",
192 		gc_readl(priv, REG_GCISCR),
193 		gc_readl(priv, REG_GCRBBR),
194 		gc_readl(priv, REG_GCRBLR),
195 		(gc_readl(priv, REG_GCRBEXHR) - base) / 4,
196 		(gc_readl(priv, REG_GCRBHR) - base) / 4,
197 		(gc_readl(priv, REG_GCRBTR) - base) / 4);
198 }
199 
200 static void
201 flush_running(struct pxa3xx_gcu_priv *priv)
202 {
203 	struct pxa3xx_gcu_batch *running = priv->running;
204 	struct pxa3xx_gcu_batch *next;
205 
206 	while (running) {
207 		next = running->next;
208 		running->next = priv->free;
209 		priv->free = running;
210 		running = next;
211 	}
212 
213 	priv->running = NULL;
214 }
215 
216 static void
217 run_ready(struct pxa3xx_gcu_priv *priv)
218 {
219 	unsigned int num = 0;
220 	struct pxa3xx_gcu_shared *shared = priv->shared;
221 	struct pxa3xx_gcu_batch	*ready = priv->ready;
222 
223 	QDUMP("Start");
224 
225 	BUG_ON(!ready);
226 
227 	shared->buffer[num++] = 0x05000000;
228 
229 	while (ready) {
230 		shared->buffer[num++] = 0x00000001;
231 		shared->buffer[num++] = ready->phys;
232 		ready = ready->next;
233 	}
234 
235 	shared->buffer[num++] = 0x05000000;
236 	priv->running = priv->ready;
237 	priv->ready = priv->ready_last = NULL;
238 	gc_writel(priv, REG_GCRBLR, 0);
239 	shared->hw_running = 1;
240 
241 	/* ring base address */
242 	gc_writel(priv, REG_GCRBBR, shared->buffer_phys);
243 
244 	/* ring tail address */
245 	gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4);
246 
247 	/* ring length */
248 	gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4);
249 }
250 
251 static irqreturn_t
252 pxa3xx_gcu_handle_irq(int irq, void *ctx)
253 {
254 	struct pxa3xx_gcu_priv *priv = ctx;
255 	struct pxa3xx_gcu_shared *shared = priv->shared;
256 	u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL;
257 
258 	QDUMP("-Interrupt");
259 
260 	if (!status)
261 		return IRQ_NONE;
262 
263 	spin_lock(&priv->spinlock);
264 	shared->num_interrupts++;
265 
266 	if (status & IE_EEOB) {
267 		QDUMP(" [EEOB]");
268 
269 		flush_running(priv);
270 		wake_up_all(&priv->wait_free);
271 
272 		if (priv->ready) {
273 			run_ready(priv);
274 		} else {
275 			/* There is no more data prepared by the userspace.
276 			 * Set hw_running = 0 and wait for the next userspace
277 			 * kick-off */
278 			shared->num_idle++;
279 			shared->hw_running = 0;
280 
281 			QDUMP(" '-> Idle.");
282 
283 			/* set ring buffer length to zero */
284 			gc_writel(priv, REG_GCRBLR, 0);
285 
286 			wake_up_all(&priv->wait_idle);
287 		}
288 
289 		shared->num_done++;
290 	} else {
291 		QERROR(" [???]");
292 		dump_whole_state(priv);
293 	}
294 
295 	/* Clear the interrupt */
296 	gc_writel(priv, REG_GCISCR, status);
297 	spin_unlock(&priv->spinlock);
298 
299 	return IRQ_HANDLED;
300 }
301 
302 static int
303 pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
304 {
305 	int ret = 0;
306 
307 	QDUMP("Waiting for idle...");
308 
309 	/* Does not need to be atomic. There's a lock in user space,
310 	 * but anyhow, this is just for statistics. */
311 	priv->shared->num_wait_idle++;
312 
313 	while (priv->shared->hw_running) {
314 		int num = priv->shared->num_interrupts;
315 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
316 
317 		ret = wait_event_interruptible_timeout(priv->wait_idle,
318 					!priv->shared->hw_running, HZ*4);
319 
320 		if (ret != 0)
321 			break;
322 
323 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
324 		    priv->shared->num_interrupts == num) {
325 			QERROR("TIMEOUT");
326 			ret = -ETIMEDOUT;
327 			break;
328 		}
329 	}
330 
331 	QDUMP("done");
332 
333 	return ret;
334 }
335 
336 static int
337 pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
338 {
339 	int ret = 0;
340 
341 	QDUMP("Waiting for free...");
342 
343 	/* Does not need to be atomic. There's a lock in user space,
344 	 * but anyhow, this is just for statistics. */
345 	priv->shared->num_wait_free++;
346 
347 	while (!priv->free) {
348 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
349 
350 		ret = wait_event_interruptible_timeout(priv->wait_free,
351 						       priv->free, HZ*4);
352 
353 		if (ret < 0)
354 			break;
355 
356 		if (ret > 0)
357 			continue;
358 
359 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) {
360 			QERROR("TIMEOUT");
361 			ret = -ETIMEDOUT;
362 			break;
363 		}
364 	}
365 
366 	QDUMP("done");
367 
368 	return ret;
369 }
370 
371 /* Misc device layer */
372 
373 static inline struct pxa3xx_gcu_priv *to_pxa3xx_gcu_priv(struct file *file)
374 {
375 	struct miscdevice *dev = file->private_data;
376 	return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
377 }
378 
379 /*
380  * provide an empty .open callback, so the core sets file->private_data
381  * for us.
382  */
383 static int pxa3xx_gcu_open(struct inode *inode, struct file *file)
384 {
385 	return 0;
386 }
387 
388 static ssize_t
389 pxa3xx_gcu_write(struct file *file, const char *buff,
390 		 size_t count, loff_t *offp)
391 {
392 	int ret;
393 	unsigned long flags;
394 	struct pxa3xx_gcu_batch	*buffer;
395 	struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
396 
397 	int words = count / 4;
398 
399 	/* Does not need to be atomic. There's a lock in user space,
400 	 * but anyhow, this is just for statistics. */
401 	priv->shared->num_writes++;
402 	priv->shared->num_words += words;
403 
404 	/* Last word reserved for batch buffer end command */
405 	if (words >= PXA3XX_GCU_BATCH_WORDS)
406 		return -E2BIG;
407 
408 	/* Wait for a free buffer */
409 	if (!priv->free) {
410 		ret = pxa3xx_gcu_wait_free(priv);
411 		if (ret < 0)
412 			return ret;
413 	}
414 
415 	/*
416 	 * Get buffer from free list
417 	 */
418 	spin_lock_irqsave(&priv->spinlock, flags);
419 	buffer = priv->free;
420 	priv->free = buffer->next;
421 	spin_unlock_irqrestore(&priv->spinlock, flags);
422 
423 
424 	/* Copy data from user into buffer */
425 	ret = copy_from_user(buffer->ptr, buff, words * 4);
426 	if (ret) {
427 		spin_lock_irqsave(&priv->spinlock, flags);
428 		buffer->next = priv->free;
429 		priv->free = buffer;
430 		spin_unlock_irqrestore(&priv->spinlock, flags);
431 		return -EFAULT;
432 	}
433 
434 	buffer->length = words;
435 
436 	/* Append batch buffer end command */
437 	buffer->ptr[words] = 0x01000000;
438 
439 	/*
440 	 * Add buffer to ready list
441 	 */
442 	spin_lock_irqsave(&priv->spinlock, flags);
443 
444 	buffer->next = NULL;
445 
446 	if (priv->ready) {
447 		BUG_ON(priv->ready_last == NULL);
448 
449 		priv->ready_last->next = buffer;
450 	} else
451 		priv->ready = buffer;
452 
453 	priv->ready_last = buffer;
454 
455 	if (!priv->shared->hw_running)
456 		run_ready(priv);
457 
458 	spin_unlock_irqrestore(&priv->spinlock, flags);
459 
460 	return words * 4;
461 }
462 
463 
464 static long
465 pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
466 {
467 	unsigned long flags;
468 	struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
469 
470 	switch (cmd) {
471 	case PXA3XX_GCU_IOCTL_RESET:
472 		spin_lock_irqsave(&priv->spinlock, flags);
473 		pxa3xx_gcu_reset(priv);
474 		spin_unlock_irqrestore(&priv->spinlock, flags);
475 		return 0;
476 
477 	case PXA3XX_GCU_IOCTL_WAIT_IDLE:
478 		return pxa3xx_gcu_wait_idle(priv);
479 	}
480 
481 	return -ENOSYS;
482 }
483 
484 static int
485 pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
486 {
487 	unsigned int size = vma->vm_end - vma->vm_start;
488 	struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
489 
490 	switch (vma->vm_pgoff) {
491 	case 0:
492 		/* hand out the shared data area */
493 		if (size != SHARED_SIZE)
494 			return -EINVAL;
495 
496 		return dma_mmap_coherent(NULL, vma,
497 			priv->shared, priv->shared_phys, size);
498 
499 	case SHARED_SIZE >> PAGE_SHIFT:
500 		/* hand out the MMIO base for direct register access
501 		 * from userspace */
502 		if (size != resource_size(priv->resource_mem))
503 			return -EINVAL;
504 
505 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
506 
507 		return io_remap_pfn_range(vma, vma->vm_start,
508 				priv->resource_mem->start >> PAGE_SHIFT,
509 				size, vma->vm_page_prot);
510 	}
511 
512 	return -EINVAL;
513 }
514 
515 
516 #ifdef PXA3XX_GCU_DEBUG_TIMER
517 static struct timer_list pxa3xx_gcu_debug_timer;
518 static struct pxa3xx_gcu_priv *debug_timer_priv;
519 
520 static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
521 {
522 	struct pxa3xx_gcu_priv *priv = debug_timer_priv;
523 
524 	QERROR("Timer DUMP");
525 
526 	mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
527 }
528 
529 static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
530 {
531 	/* init the timer structure */
532 	debug_timer_priv = priv;
533 	timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
534 	pxa3xx_gcu_debug_timedout(NULL);
535 }
536 #else
537 static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
538 #endif
539 
540 static int
541 pxa3xx_gcu_add_buffer(struct device *dev,
542 		      struct pxa3xx_gcu_priv *priv)
543 {
544 	struct pxa3xx_gcu_batch *buffer;
545 
546 	buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL);
547 	if (!buffer)
548 		return -ENOMEM;
549 
550 	buffer->ptr = dma_alloc_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
551 					 &buffer->phys, GFP_KERNEL);
552 	if (!buffer->ptr) {
553 		kfree(buffer);
554 		return -ENOMEM;
555 	}
556 
557 	buffer->next = priv->free;
558 	priv->free = buffer;
559 
560 	return 0;
561 }
562 
563 static void
564 pxa3xx_gcu_free_buffers(struct device *dev,
565 			struct pxa3xx_gcu_priv *priv)
566 {
567 	struct pxa3xx_gcu_batch *next, *buffer = priv->free;
568 
569 	while (buffer) {
570 		next = buffer->next;
571 
572 		dma_free_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
573 				  buffer->ptr, buffer->phys);
574 
575 		kfree(buffer);
576 		buffer = next;
577 	}
578 
579 	priv->free = NULL;
580 }
581 
582 static const struct file_operations pxa3xx_gcu_miscdev_fops = {
583 	.owner =		THIS_MODULE,
584 	.open =			pxa3xx_gcu_open,
585 	.write =		pxa3xx_gcu_write,
586 	.unlocked_ioctl =	pxa3xx_gcu_ioctl,
587 	.mmap =			pxa3xx_gcu_mmap,
588 };
589 
590 static int pxa3xx_gcu_probe(struct platform_device *pdev)
591 {
592 	int i, ret, irq;
593 	struct resource *r;
594 	struct pxa3xx_gcu_priv *priv;
595 	struct device *dev = &pdev->dev;
596 
597 	priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
598 	if (!priv)
599 		return -ENOMEM;
600 
601 	init_waitqueue_head(&priv->wait_idle);
602 	init_waitqueue_head(&priv->wait_free);
603 	spin_lock_init(&priv->spinlock);
604 
605 	/* we allocate the misc device structure as part of our own allocation,
606 	 * so we can get a pointer to our priv structure later on with
607 	 * container_of(). This isn't really necessary as we have a fixed minor
608 	 * number anyway, but this is to avoid statics. */
609 
610 	priv->misc_dev.minor	= MISCDEV_MINOR,
611 	priv->misc_dev.name	= DRV_NAME,
612 	priv->misc_dev.fops	= &pxa3xx_gcu_miscdev_fops;
613 
614 	/* handle IO resources */
615 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
616 	priv->mmio_base = devm_ioremap_resource(dev, r);
617 	if (IS_ERR(priv->mmio_base))
618 		return PTR_ERR(priv->mmio_base);
619 
620 	/* enable the clock */
621 	priv->clk = devm_clk_get(dev, NULL);
622 	if (IS_ERR(priv->clk)) {
623 		dev_err(dev, "failed to get clock\n");
624 		return PTR_ERR(priv->clk);
625 	}
626 
627 	/* request the IRQ */
628 	irq = platform_get_irq(pdev, 0);
629 	if (irq < 0) {
630 		dev_err(dev, "no IRQ defined: %d\n", irq);
631 		return irq;
632 	}
633 
634 	ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
635 			       0, DRV_NAME, priv);
636 	if (ret < 0) {
637 		dev_err(dev, "request_irq failed\n");
638 		return ret;
639 	}
640 
641 	/* allocate dma memory */
642 	priv->shared = dma_alloc_coherent(dev, SHARED_SIZE,
643 					  &priv->shared_phys, GFP_KERNEL);
644 	if (!priv->shared) {
645 		dev_err(dev, "failed to allocate DMA memory\n");
646 		return -ENOMEM;
647 	}
648 
649 	/* register misc device */
650 	ret = misc_register(&priv->misc_dev);
651 	if (ret < 0) {
652 		dev_err(dev, "misc_register() for minor %d failed\n",
653 			MISCDEV_MINOR);
654 		goto err_free_dma;
655 	}
656 
657 	ret = clk_prepare_enable(priv->clk);
658 	if (ret < 0) {
659 		dev_err(dev, "failed to enable clock\n");
660 		goto err_misc_deregister;
661 	}
662 
663 	for (i = 0; i < 8; i++) {
664 		ret = pxa3xx_gcu_add_buffer(dev, priv);
665 		if (ret) {
666 			dev_err(dev, "failed to allocate DMA memory\n");
667 			goto err_disable_clk;
668 		}
669 	}
670 
671 	platform_set_drvdata(pdev, priv);
672 	priv->resource_mem = r;
673 	pxa3xx_gcu_reset(priv);
674 	pxa3xx_gcu_init_debug_timer(priv);
675 
676 	dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
677 			(void *) r->start, (void *) priv->shared_phys,
678 			SHARED_SIZE, irq);
679 	return 0;
680 
681 err_free_dma:
682 	dma_free_coherent(dev, SHARED_SIZE,
683 			priv->shared, priv->shared_phys);
684 
685 err_misc_deregister:
686 	misc_deregister(&priv->misc_dev);
687 
688 err_disable_clk:
689 	clk_disable_unprepare(priv->clk);
690 
691 	return ret;
692 }
693 
694 static int pxa3xx_gcu_remove(struct platform_device *pdev)
695 {
696 	struct pxa3xx_gcu_priv *priv = platform_get_drvdata(pdev);
697 	struct device *dev = &pdev->dev;
698 
699 	pxa3xx_gcu_wait_idle(priv);
700 	misc_deregister(&priv->misc_dev);
701 	dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
702 	pxa3xx_gcu_free_buffers(dev, priv);
703 
704 	return 0;
705 }
706 
707 #ifdef CONFIG_OF
708 static const struct of_device_id pxa3xx_gcu_of_match[] = {
709 	{ .compatible = "marvell,pxa300-gcu", },
710 	{ }
711 };
712 MODULE_DEVICE_TABLE(of, pxa3xx_gcu_of_match);
713 #endif
714 
715 static struct platform_driver pxa3xx_gcu_driver = {
716 	.probe	  = pxa3xx_gcu_probe,
717 	.remove	 = pxa3xx_gcu_remove,
718 	.driver	 = {
719 		.name   = DRV_NAME,
720 		.of_match_table = of_match_ptr(pxa3xx_gcu_of_match),
721 	},
722 };
723 
724 module_platform_driver(pxa3xx_gcu_driver);
725 
726 MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
727 MODULE_LICENSE("GPL");
728 MODULE_ALIAS_MISCDEV(MISCDEV_MINOR);
729 MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
730 		"Denis Oliver Kropp <dok@directfb.org>, "
731 		"Daniel Mack <daniel@caiaq.de>");
732