1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
4 *
5 * This driver needs a DirectFB counterpart in user space, communication
6 * is handled via mmap()ed memory areas and an ioctl.
7 *
8 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
9 * Copyright (c) 2009 Janine Kropp <nin@directfb.org>
10 * Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org>
11 */
12
13 /*
14 * WARNING: This controller is attached to System Bus 2 of the PXA which
15 * needs its arbiter to be enabled explicitly (CKENB & 1<<9).
16 * There is currently no way to do this from Linux, so you need to teach
17 * your bootloader for now.
18 */
19
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/miscdevice.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <linux/ioctl.h>
28 #include <linux/delay.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/clk.h>
32 #include <linux/fs.h>
33 #include <linux/io.h>
34 #include <linux/of.h>
35
36 #include "pxa3xx-gcu.h"
37
38 #define DRV_NAME "pxa3xx-gcu"
39
40 #define REG_GCCR 0x00
41 #define GCCR_SYNC_CLR (1 << 9)
42 #define GCCR_BP_RST (1 << 8)
43 #define GCCR_ABORT (1 << 6)
44 #define GCCR_STOP (1 << 4)
45
46 #define REG_GCISCR 0x04
47 #define REG_GCIECR 0x08
48 #define REG_GCRBBR 0x20
49 #define REG_GCRBLR 0x24
50 #define REG_GCRBHR 0x28
51 #define REG_GCRBTR 0x2C
52 #define REG_GCRBEXHR 0x30
53
54 #define IE_EOB (1 << 0)
55 #define IE_EEOB (1 << 5)
56 #define IE_ALL 0xff
57
58 #define SHARED_SIZE PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared))
59
60 /* #define PXA3XX_GCU_DEBUG */
61 /* #define PXA3XX_GCU_DEBUG_TIMER */
62
63 #ifdef PXA3XX_GCU_DEBUG
64 #define QDUMP(msg) \
65 do { \
66 QPRINT(priv, KERN_DEBUG, msg); \
67 } while (0)
68 #else
69 #define QDUMP(msg) do {} while (0)
70 #endif
71
72 #define QERROR(msg) \
73 do { \
74 QPRINT(priv, KERN_ERR, msg); \
75 } while (0)
76
77 struct pxa3xx_gcu_batch {
78 struct pxa3xx_gcu_batch *next;
79 u32 *ptr;
80 dma_addr_t phys;
81 unsigned long length;
82 };
83
84 struct pxa3xx_gcu_priv {
85 struct device *dev;
86 void __iomem *mmio_base;
87 struct clk *clk;
88 struct pxa3xx_gcu_shared *shared;
89 dma_addr_t shared_phys;
90 struct resource *resource_mem;
91 struct miscdevice misc_dev;
92 wait_queue_head_t wait_idle;
93 wait_queue_head_t wait_free;
94 spinlock_t spinlock;
95 struct timespec64 base_time;
96
97 struct pxa3xx_gcu_batch *free;
98 struct pxa3xx_gcu_batch *ready;
99 struct pxa3xx_gcu_batch *ready_last;
100 struct pxa3xx_gcu_batch *running;
101 };
102
103 static inline unsigned long
gc_readl(struct pxa3xx_gcu_priv * priv,unsigned int off)104 gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off)
105 {
106 return __raw_readl(priv->mmio_base + off);
107 }
108
109 static inline void
gc_writel(struct pxa3xx_gcu_priv * priv,unsigned int off,unsigned long val)110 gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val)
111 {
112 __raw_writel(val, priv->mmio_base + off);
113 }
114
115 #define QPRINT(priv, level, msg) \
116 do { \
117 struct timespec64 ts; \
118 struct pxa3xx_gcu_shared *shared = priv->shared; \
119 u32 base = gc_readl(priv, REG_GCRBBR); \
120 \
121 ktime_get_ts64(&ts); \
122 ts = timespec64_sub(ts, priv->base_time); \
123 \
124 printk(level "%lld.%03ld.%03ld - %-17s: %-21s (%s, " \
125 "STATUS " \
126 "0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, " \
127 "T %5ld)\n", \
128 (s64)(ts.tv_sec), \
129 ts.tv_nsec / NSEC_PER_MSEC, \
130 (ts.tv_nsec % NSEC_PER_MSEC) / USEC_PER_MSEC, \
131 __func__, msg, \
132 shared->hw_running ? "running" : " idle", \
133 gc_readl(priv, REG_GCISCR), \
134 gc_readl(priv, REG_GCRBBR), \
135 gc_readl(priv, REG_GCRBLR), \
136 (gc_readl(priv, REG_GCRBEXHR) - base) / 4, \
137 (gc_readl(priv, REG_GCRBHR) - base) / 4, \
138 (gc_readl(priv, REG_GCRBTR) - base) / 4); \
139 } while (0)
140
141 static void
pxa3xx_gcu_reset(struct pxa3xx_gcu_priv * priv)142 pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
143 {
144 QDUMP("RESET");
145
146 /* disable interrupts */
147 gc_writel(priv, REG_GCIECR, 0);
148
149 /* reset hardware */
150 gc_writel(priv, REG_GCCR, GCCR_ABORT);
151 gc_writel(priv, REG_GCCR, 0);
152
153 memset(priv->shared, 0, SHARED_SIZE);
154 priv->shared->buffer_phys = priv->shared_phys;
155 priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
156
157 ktime_get_ts64(&priv->base_time);
158
159 /* set up the ring buffer pointers */
160 gc_writel(priv, REG_GCRBLR, 0);
161 gc_writel(priv, REG_GCRBBR, priv->shared_phys);
162 gc_writel(priv, REG_GCRBTR, priv->shared_phys);
163
164 /* enable all IRQs except EOB */
165 gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
166 }
167
168 static void
dump_whole_state(struct pxa3xx_gcu_priv * priv)169 dump_whole_state(struct pxa3xx_gcu_priv *priv)
170 {
171 struct pxa3xx_gcu_shared *sh = priv->shared;
172 u32 base = gc_readl(priv, REG_GCRBBR);
173
174 QDUMP("DUMP");
175
176 printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
177 "%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
178 sh->hw_running ? "running" : "idle ",
179 gc_readl(priv, REG_GCISCR),
180 gc_readl(priv, REG_GCRBBR),
181 gc_readl(priv, REG_GCRBLR),
182 (gc_readl(priv, REG_GCRBEXHR) - base) / 4,
183 (gc_readl(priv, REG_GCRBHR) - base) / 4,
184 (gc_readl(priv, REG_GCRBTR) - base) / 4);
185 }
186
187 static void
flush_running(struct pxa3xx_gcu_priv * priv)188 flush_running(struct pxa3xx_gcu_priv *priv)
189 {
190 struct pxa3xx_gcu_batch *running = priv->running;
191 struct pxa3xx_gcu_batch *next;
192
193 while (running) {
194 next = running->next;
195 running->next = priv->free;
196 priv->free = running;
197 running = next;
198 }
199
200 priv->running = NULL;
201 }
202
203 static void
run_ready(struct pxa3xx_gcu_priv * priv)204 run_ready(struct pxa3xx_gcu_priv *priv)
205 {
206 unsigned int num = 0;
207 struct pxa3xx_gcu_shared *shared = priv->shared;
208 struct pxa3xx_gcu_batch *ready = priv->ready;
209
210 QDUMP("Start");
211
212 BUG_ON(!ready);
213
214 shared->buffer[num++] = 0x05000000;
215
216 while (ready) {
217 shared->buffer[num++] = 0x00000001;
218 shared->buffer[num++] = ready->phys;
219 ready = ready->next;
220 }
221
222 shared->buffer[num++] = 0x05000000;
223 priv->running = priv->ready;
224 priv->ready = priv->ready_last = NULL;
225 gc_writel(priv, REG_GCRBLR, 0);
226 shared->hw_running = 1;
227
228 /* ring base address */
229 gc_writel(priv, REG_GCRBBR, shared->buffer_phys);
230
231 /* ring tail address */
232 gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4);
233
234 /* ring length */
235 gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4);
236 }
237
238 static irqreturn_t
pxa3xx_gcu_handle_irq(int irq,void * ctx)239 pxa3xx_gcu_handle_irq(int irq, void *ctx)
240 {
241 struct pxa3xx_gcu_priv *priv = ctx;
242 struct pxa3xx_gcu_shared *shared = priv->shared;
243 u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL;
244
245 QDUMP("-Interrupt");
246
247 if (!status)
248 return IRQ_NONE;
249
250 spin_lock(&priv->spinlock);
251 shared->num_interrupts++;
252
253 if (status & IE_EEOB) {
254 QDUMP(" [EEOB]");
255
256 flush_running(priv);
257 wake_up_all(&priv->wait_free);
258
259 if (priv->ready) {
260 run_ready(priv);
261 } else {
262 /* There is no more data prepared by the userspace.
263 * Set hw_running = 0 and wait for the next userspace
264 * kick-off */
265 shared->num_idle++;
266 shared->hw_running = 0;
267
268 QDUMP(" '-> Idle.");
269
270 /* set ring buffer length to zero */
271 gc_writel(priv, REG_GCRBLR, 0);
272
273 wake_up_all(&priv->wait_idle);
274 }
275
276 shared->num_done++;
277 } else {
278 QERROR(" [???]");
279 dump_whole_state(priv);
280 }
281
282 /* Clear the interrupt */
283 gc_writel(priv, REG_GCISCR, status);
284 spin_unlock(&priv->spinlock);
285
286 return IRQ_HANDLED;
287 }
288
289 static int
pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv * priv)290 pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
291 {
292 int ret = 0;
293
294 QDUMP("Waiting for idle...");
295
296 /* Does not need to be atomic. There's a lock in user space,
297 * but anyhow, this is just for statistics. */
298 priv->shared->num_wait_idle++;
299
300 while (priv->shared->hw_running) {
301 int num = priv->shared->num_interrupts;
302 u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
303
304 ret = wait_event_interruptible_timeout(priv->wait_idle,
305 !priv->shared->hw_running, HZ*4);
306
307 if (ret != 0)
308 break;
309
310 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
311 priv->shared->num_interrupts == num) {
312 QERROR("TIMEOUT");
313 ret = -ETIMEDOUT;
314 break;
315 }
316 }
317
318 QDUMP("done");
319
320 return ret;
321 }
322
323 static int
pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv * priv)324 pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
325 {
326 int ret = 0;
327
328 QDUMP("Waiting for free...");
329
330 /* Does not need to be atomic. There's a lock in user space,
331 * but anyhow, this is just for statistics. */
332 priv->shared->num_wait_free++;
333
334 while (!priv->free) {
335 u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
336
337 ret = wait_event_interruptible_timeout(priv->wait_free,
338 priv->free, HZ*4);
339
340 if (ret < 0)
341 break;
342
343 if (ret > 0)
344 continue;
345
346 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) {
347 QERROR("TIMEOUT");
348 ret = -ETIMEDOUT;
349 break;
350 }
351 }
352
353 QDUMP("done");
354
355 return ret;
356 }
357
358 /* Misc device layer */
359
to_pxa3xx_gcu_priv(struct file * file)360 static inline struct pxa3xx_gcu_priv *to_pxa3xx_gcu_priv(struct file *file)
361 {
362 struct miscdevice *dev = file->private_data;
363 return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
364 }
365
366 /*
367 * provide an empty .open callback, so the core sets file->private_data
368 * for us.
369 */
pxa3xx_gcu_open(struct inode * inode,struct file * file)370 static int pxa3xx_gcu_open(struct inode *inode, struct file *file)
371 {
372 return 0;
373 }
374
375 static ssize_t
pxa3xx_gcu_write(struct file * file,const char * buff,size_t count,loff_t * offp)376 pxa3xx_gcu_write(struct file *file, const char *buff,
377 size_t count, loff_t *offp)
378 {
379 int ret;
380 unsigned long flags;
381 struct pxa3xx_gcu_batch *buffer;
382 struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
383
384 size_t words = count / 4;
385
386 /* Does not need to be atomic. There's a lock in user space,
387 * but anyhow, this is just for statistics. */
388 priv->shared->num_writes++;
389 priv->shared->num_words += words;
390
391 /* Last word reserved for batch buffer end command */
392 if (words >= PXA3XX_GCU_BATCH_WORDS)
393 return -E2BIG;
394
395 /* Wait for a free buffer */
396 if (!priv->free) {
397 ret = pxa3xx_gcu_wait_free(priv);
398 if (ret < 0)
399 return ret;
400 }
401
402 /*
403 * Get buffer from free list
404 */
405 spin_lock_irqsave(&priv->spinlock, flags);
406 buffer = priv->free;
407 priv->free = buffer->next;
408 spin_unlock_irqrestore(&priv->spinlock, flags);
409
410
411 /* Copy data from user into buffer */
412 ret = copy_from_user(buffer->ptr, buff, words * 4);
413 if (ret) {
414 spin_lock_irqsave(&priv->spinlock, flags);
415 buffer->next = priv->free;
416 priv->free = buffer;
417 spin_unlock_irqrestore(&priv->spinlock, flags);
418 return -EFAULT;
419 }
420
421 buffer->length = words;
422
423 /* Append batch buffer end command */
424 buffer->ptr[words] = 0x01000000;
425
426 /*
427 * Add buffer to ready list
428 */
429 spin_lock_irqsave(&priv->spinlock, flags);
430
431 buffer->next = NULL;
432
433 if (priv->ready) {
434 BUG_ON(priv->ready_last == NULL);
435
436 priv->ready_last->next = buffer;
437 } else
438 priv->ready = buffer;
439
440 priv->ready_last = buffer;
441
442 if (!priv->shared->hw_running)
443 run_ready(priv);
444
445 spin_unlock_irqrestore(&priv->spinlock, flags);
446
447 return words * 4;
448 }
449
450
451 static long
pxa3xx_gcu_ioctl(struct file * file,unsigned int cmd,unsigned long arg)452 pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
453 {
454 unsigned long flags;
455 struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
456
457 switch (cmd) {
458 case PXA3XX_GCU_IOCTL_RESET:
459 spin_lock_irqsave(&priv->spinlock, flags);
460 pxa3xx_gcu_reset(priv);
461 spin_unlock_irqrestore(&priv->spinlock, flags);
462 return 0;
463
464 case PXA3XX_GCU_IOCTL_WAIT_IDLE:
465 return pxa3xx_gcu_wait_idle(priv);
466 }
467
468 return -ENOSYS;
469 }
470
471 static int
pxa3xx_gcu_mmap(struct file * file,struct vm_area_struct * vma)472 pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
473 {
474 unsigned int size = vma->vm_end - vma->vm_start;
475 struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
476
477 switch (vma->vm_pgoff) {
478 case 0:
479 /* hand out the shared data area */
480 if (size != SHARED_SIZE)
481 return -EINVAL;
482
483 return dma_mmap_coherent(priv->dev, vma,
484 priv->shared, priv->shared_phys, size);
485
486 case SHARED_SIZE >> PAGE_SHIFT:
487 /* hand out the MMIO base for direct register access
488 * from userspace */
489 if (size != resource_size(priv->resource_mem))
490 return -EINVAL;
491
492 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
493
494 return io_remap_pfn_range(vma, vma->vm_start,
495 priv->resource_mem->start >> PAGE_SHIFT,
496 size, vma->vm_page_prot);
497 }
498
499 return -EINVAL;
500 }
501
502
503 #ifdef PXA3XX_GCU_DEBUG_TIMER
504 static struct timer_list pxa3xx_gcu_debug_timer;
505 static struct pxa3xx_gcu_priv *debug_timer_priv;
506
pxa3xx_gcu_debug_timedout(struct timer_list * unused)507 static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
508 {
509 struct pxa3xx_gcu_priv *priv = debug_timer_priv;
510
511 QERROR("Timer DUMP");
512
513 mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
514 }
515
pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv * priv)516 static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
517 {
518 /* init the timer structure */
519 debug_timer_priv = priv;
520 timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
521 pxa3xx_gcu_debug_timedout(NULL);
522 }
523 #else
pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv * priv)524 static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
525 #endif
526
527 static int
pxa3xx_gcu_add_buffer(struct device * dev,struct pxa3xx_gcu_priv * priv)528 pxa3xx_gcu_add_buffer(struct device *dev,
529 struct pxa3xx_gcu_priv *priv)
530 {
531 struct pxa3xx_gcu_batch *buffer;
532
533 buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL);
534 if (!buffer)
535 return -ENOMEM;
536
537 buffer->ptr = dma_alloc_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
538 &buffer->phys, GFP_KERNEL);
539 if (!buffer->ptr) {
540 kfree(buffer);
541 return -ENOMEM;
542 }
543
544 buffer->next = priv->free;
545 priv->free = buffer;
546
547 return 0;
548 }
549
550 static void
pxa3xx_gcu_free_buffers(struct device * dev,struct pxa3xx_gcu_priv * priv)551 pxa3xx_gcu_free_buffers(struct device *dev,
552 struct pxa3xx_gcu_priv *priv)
553 {
554 struct pxa3xx_gcu_batch *next, *buffer = priv->free;
555
556 while (buffer) {
557 next = buffer->next;
558
559 dma_free_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
560 buffer->ptr, buffer->phys);
561
562 kfree(buffer);
563 buffer = next;
564 }
565
566 priv->free = NULL;
567 }
568
569 static const struct file_operations pxa3xx_gcu_miscdev_fops = {
570 .owner = THIS_MODULE,
571 .open = pxa3xx_gcu_open,
572 .write = pxa3xx_gcu_write,
573 .unlocked_ioctl = pxa3xx_gcu_ioctl,
574 .mmap = pxa3xx_gcu_mmap,
575 };
576
pxa3xx_gcu_probe(struct platform_device * pdev)577 static int pxa3xx_gcu_probe(struct platform_device *pdev)
578 {
579 int i, ret, irq;
580 struct resource *r;
581 struct pxa3xx_gcu_priv *priv;
582 struct device *dev = &pdev->dev;
583
584 priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
585 if (!priv)
586 return -ENOMEM;
587
588 init_waitqueue_head(&priv->wait_idle);
589 init_waitqueue_head(&priv->wait_free);
590 spin_lock_init(&priv->spinlock);
591
592 /* we allocate the misc device structure as part of our own allocation,
593 * so we can get a pointer to our priv structure later on with
594 * container_of(). This isn't really necessary as we have a fixed minor
595 * number anyway, but this is to avoid statics. */
596
597 priv->misc_dev.minor = PXA3XX_GCU_MINOR,
598 priv->misc_dev.name = DRV_NAME,
599 priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
600
601 /* handle IO resources */
602 priv->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
603 if (IS_ERR(priv->mmio_base))
604 return PTR_ERR(priv->mmio_base);
605
606 /* enable the clock */
607 priv->clk = devm_clk_get(dev, NULL);
608 if (IS_ERR(priv->clk))
609 return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to get clock\n");
610
611 /* request the IRQ */
612 irq = platform_get_irq(pdev, 0);
613 if (irq < 0)
614 return irq;
615
616 ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
617 0, DRV_NAME, priv);
618 if (ret < 0) {
619 dev_err(dev, "request_irq failed\n");
620 return ret;
621 }
622
623 /* allocate dma memory */
624 priv->shared = dma_alloc_coherent(dev, SHARED_SIZE,
625 &priv->shared_phys, GFP_KERNEL);
626 if (!priv->shared) {
627 dev_err(dev, "failed to allocate DMA memory\n");
628 return -ENOMEM;
629 }
630
631 /* register misc device */
632 ret = misc_register(&priv->misc_dev);
633 if (ret < 0) {
634 dev_err(dev, "misc_register() for minor %d failed\n",
635 PXA3XX_GCU_MINOR);
636 goto err_free_dma;
637 }
638
639 ret = clk_prepare_enable(priv->clk);
640 if (ret < 0) {
641 dev_err(dev, "failed to enable clock\n");
642 goto err_misc_deregister;
643 }
644
645 for (i = 0; i < 8; i++) {
646 ret = pxa3xx_gcu_add_buffer(dev, priv);
647 if (ret) {
648 pxa3xx_gcu_free_buffers(dev, priv);
649 dev_err(dev, "failed to allocate DMA memory\n");
650 goto err_disable_clk;
651 }
652 }
653
654 platform_set_drvdata(pdev, priv);
655 priv->resource_mem = r;
656 priv->dev = dev;
657 pxa3xx_gcu_reset(priv);
658 pxa3xx_gcu_init_debug_timer(priv);
659
660 dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
661 (void *) r->start, (void *) priv->shared_phys,
662 SHARED_SIZE, irq);
663 return 0;
664
665 err_disable_clk:
666 clk_disable_unprepare(priv->clk);
667
668 err_misc_deregister:
669 misc_deregister(&priv->misc_dev);
670
671 err_free_dma:
672 dma_free_coherent(dev, SHARED_SIZE,
673 priv->shared, priv->shared_phys);
674
675 return ret;
676 }
677
pxa3xx_gcu_remove(struct platform_device * pdev)678 static void pxa3xx_gcu_remove(struct platform_device *pdev)
679 {
680 struct pxa3xx_gcu_priv *priv = platform_get_drvdata(pdev);
681 struct device *dev = &pdev->dev;
682
683 pxa3xx_gcu_wait_idle(priv);
684 misc_deregister(&priv->misc_dev);
685 dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
686 clk_disable_unprepare(priv->clk);
687 pxa3xx_gcu_free_buffers(dev, priv);
688 }
689
690 #ifdef CONFIG_OF
691 static const struct of_device_id pxa3xx_gcu_of_match[] = {
692 { .compatible = "marvell,pxa300-gcu", },
693 { }
694 };
695 MODULE_DEVICE_TABLE(of, pxa3xx_gcu_of_match);
696 #endif
697
698 static struct platform_driver pxa3xx_gcu_driver = {
699 .probe = pxa3xx_gcu_probe,
700 .remove_new = pxa3xx_gcu_remove,
701 .driver = {
702 .name = DRV_NAME,
703 .of_match_table = of_match_ptr(pxa3xx_gcu_of_match),
704 },
705 };
706
707 module_platform_driver(pxa3xx_gcu_driver);
708
709 MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
710 MODULE_LICENSE("GPL");
711 MODULE_ALIAS_MISCDEV(PXA3XX_GCU_MINOR);
712 MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
713 "Denis Oliver Kropp <dok@directfb.org>, "
714 "Daniel Mack <daniel@caiaq.de>");
715