xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/mmc/host.h>
29 
30 #include <asm/dma.h>
31 #include <asm/io.h>
32 #include <asm/scatterlist.h>
33 #include <asm/sizes.h>
34 
35 #include <asm/arch/pxa-regs.h>
36 #include <asm/arch/mmc.h>
37 
38 #include "pxamci.h"
39 
40 #define DRIVER_NAME	"pxa2xx-mci"
41 
42 #define NR_SG	1
43 
44 struct pxamci_host {
45 	struct mmc_host		*mmc;
46 	spinlock_t		lock;
47 	struct resource		*res;
48 	void __iomem		*base;
49 	struct clk		*clk;
50 	unsigned long		clkrate;
51 	int			irq;
52 	int			dma;
53 	unsigned int		clkrt;
54 	unsigned int		cmdat;
55 	unsigned int		imask;
56 	unsigned int		power_mode;
57 	struct pxamci_platform_data *pdata;
58 
59 	struct mmc_request	*mrq;
60 	struct mmc_command	*cmd;
61 	struct mmc_data		*data;
62 
63 	dma_addr_t		sg_dma;
64 	struct pxa_dma_desc	*sg_cpu;
65 	unsigned int		dma_len;
66 
67 	unsigned int		dma_dir;
68 };
69 
70 static void pxamci_stop_clock(struct pxamci_host *host)
71 {
72 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
73 		unsigned long timeout = 10000;
74 		unsigned int v;
75 
76 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
77 
78 		do {
79 			v = readl(host->base + MMC_STAT);
80 			if (!(v & STAT_CLK_EN))
81 				break;
82 			udelay(1);
83 		} while (timeout--);
84 
85 		if (v & STAT_CLK_EN)
86 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
87 	}
88 }
89 
90 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
91 {
92 	unsigned long flags;
93 
94 	spin_lock_irqsave(&host->lock, flags);
95 	host->imask &= ~mask;
96 	writel(host->imask, host->base + MMC_I_MASK);
97 	spin_unlock_irqrestore(&host->lock, flags);
98 }
99 
100 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
101 {
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&host->lock, flags);
105 	host->imask |= mask;
106 	writel(host->imask, host->base + MMC_I_MASK);
107 	spin_unlock_irqrestore(&host->lock, flags);
108 }
109 
110 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
111 {
112 	unsigned int nob = data->blocks;
113 	unsigned long long clks;
114 	unsigned int timeout;
115 	u32 dcmd;
116 	int i;
117 
118 	host->data = data;
119 
120 	if (data->flags & MMC_DATA_STREAM)
121 		nob = 0xffff;
122 
123 	writel(nob, host->base + MMC_NOB);
124 	writel(data->blksz, host->base + MMC_BLKLEN);
125 
126 	clks = (unsigned long long)data->timeout_ns * host->clkrate;
127 	do_div(clks, 1000000000UL);
128 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
129 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
130 
131 	if (data->flags & MMC_DATA_READ) {
132 		host->dma_dir = DMA_FROM_DEVICE;
133 		dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
134 		DRCMRTXMMC = 0;
135 		DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
136 	} else {
137 		host->dma_dir = DMA_TO_DEVICE;
138 		dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
139 		DRCMRRXMMC = 0;
140 		DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
141 	}
142 
143 	dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
144 
145 	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
146 				   host->dma_dir);
147 
148 	for (i = 0; i < host->dma_len; i++) {
149 		unsigned int length = sg_dma_len(&data->sg[i]);
150 		host->sg_cpu[i].dcmd = dcmd | length;
151 		if (length & 31 && !(data->flags & MMC_DATA_READ))
152 			host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
153 		if (data->flags & MMC_DATA_READ) {
154 			host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
155 			host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
156 		} else {
157 			host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
158 			host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
159 		}
160 		host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
161 					sizeof(struct pxa_dma_desc);
162 	}
163 	host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
164 	wmb();
165 
166 	DDADR(host->dma) = host->sg_dma;
167 	DCSR(host->dma) = DCSR_RUN;
168 }
169 
170 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
171 {
172 	WARN_ON(host->cmd != NULL);
173 	host->cmd = cmd;
174 
175 	if (cmd->flags & MMC_RSP_BUSY)
176 		cmdat |= CMDAT_BUSY;
177 
178 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
179 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
180 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
181 		cmdat |= CMDAT_RESP_SHORT;
182 		break;
183 	case RSP_TYPE(MMC_RSP_R3):
184 		cmdat |= CMDAT_RESP_R3;
185 		break;
186 	case RSP_TYPE(MMC_RSP_R2):
187 		cmdat |= CMDAT_RESP_R2;
188 		break;
189 	default:
190 		break;
191 	}
192 
193 	writel(cmd->opcode, host->base + MMC_CMD);
194 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
195 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
196 	writel(cmdat, host->base + MMC_CMDAT);
197 	writel(host->clkrt, host->base + MMC_CLKRT);
198 
199 	writel(START_CLOCK, host->base + MMC_STRPCL);
200 
201 	pxamci_enable_irq(host, END_CMD_RES);
202 }
203 
204 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
205 {
206 	host->mrq = NULL;
207 	host->cmd = NULL;
208 	host->data = NULL;
209 	mmc_request_done(host->mmc, mrq);
210 }
211 
212 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
213 {
214 	struct mmc_command *cmd = host->cmd;
215 	int i;
216 	u32 v;
217 
218 	if (!cmd)
219 		return 0;
220 
221 	host->cmd = NULL;
222 
223 	/*
224 	 * Did I mention this is Sick.  We always need to
225 	 * discard the upper 8 bits of the first 16-bit word.
226 	 */
227 	v = readl(host->base + MMC_RES) & 0xffff;
228 	for (i = 0; i < 4; i++) {
229 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
230 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
231 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
232 		v = w2;
233 	}
234 
235 	if (stat & STAT_TIME_OUT_RESPONSE) {
236 		cmd->error = -ETIMEDOUT;
237 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
238 #ifdef CONFIG_PXA27x
239 		/*
240 		 * workaround for erratum #42:
241 		 * Intel PXA27x Family Processor Specification Update Rev 001
242 		 * A bogus CRC error can appear if the msb of a 136 bit
243 		 * response is a one.
244 		 */
245 		if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) {
246 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
247 		} else
248 #endif
249 		cmd->error = -EILSEQ;
250 	}
251 
252 	pxamci_disable_irq(host, END_CMD_RES);
253 	if (host->data && !cmd->error) {
254 		pxamci_enable_irq(host, DATA_TRAN_DONE);
255 	} else {
256 		pxamci_finish_request(host, host->mrq);
257 	}
258 
259 	return 1;
260 }
261 
262 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
263 {
264 	struct mmc_data *data = host->data;
265 
266 	if (!data)
267 		return 0;
268 
269 	DCSR(host->dma) = 0;
270 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
271 		     host->dma_dir);
272 
273 	if (stat & STAT_READ_TIME_OUT)
274 		data->error = -ETIMEDOUT;
275 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
276 		data->error = -EILSEQ;
277 
278 	/*
279 	 * There appears to be a hardware design bug here.  There seems to
280 	 * be no way to find out how much data was transferred to the card.
281 	 * This means that if there was an error on any block, we mark all
282 	 * data blocks as being in error.
283 	 */
284 	if (!data->error)
285 		data->bytes_xfered = data->blocks * data->blksz;
286 	else
287 		data->bytes_xfered = 0;
288 
289 	pxamci_disable_irq(host, DATA_TRAN_DONE);
290 
291 	host->data = NULL;
292 	if (host->mrq->stop) {
293 		pxamci_stop_clock(host);
294 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
295 	} else {
296 		pxamci_finish_request(host, host->mrq);
297 	}
298 
299 	return 1;
300 }
301 
302 static irqreturn_t pxamci_irq(int irq, void *devid)
303 {
304 	struct pxamci_host *host = devid;
305 	unsigned int ireg;
306 	int handled = 0;
307 
308 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
309 
310 	if (ireg) {
311 		unsigned stat = readl(host->base + MMC_STAT);
312 
313 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
314 
315 		if (ireg & END_CMD_RES)
316 			handled |= pxamci_cmd_done(host, stat);
317 		if (ireg & DATA_TRAN_DONE)
318 			handled |= pxamci_data_done(host, stat);
319 		if (ireg & SDIO_INT) {
320 			mmc_signal_sdio_irq(host->mmc);
321 			handled = 1;
322 		}
323 	}
324 
325 	return IRQ_RETVAL(handled);
326 }
327 
328 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
329 {
330 	struct pxamci_host *host = mmc_priv(mmc);
331 	unsigned int cmdat;
332 
333 	WARN_ON(host->mrq != NULL);
334 
335 	host->mrq = mrq;
336 
337 	pxamci_stop_clock(host);
338 
339 	cmdat = host->cmdat;
340 	host->cmdat &= ~CMDAT_INIT;
341 
342 	if (mrq->data) {
343 		pxamci_setup_data(host, mrq->data);
344 
345 		cmdat &= ~CMDAT_BUSY;
346 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
347 		if (mrq->data->flags & MMC_DATA_WRITE)
348 			cmdat |= CMDAT_WRITE;
349 
350 		if (mrq->data->flags & MMC_DATA_STREAM)
351 			cmdat |= CMDAT_STREAM;
352 	}
353 
354 	pxamci_start_cmd(host, mrq->cmd, cmdat);
355 }
356 
357 static int pxamci_get_ro(struct mmc_host *mmc)
358 {
359 	struct pxamci_host *host = mmc_priv(mmc);
360 
361 	if (host->pdata && host->pdata->get_ro)
362 		return host->pdata->get_ro(mmc_dev(mmc));
363 	/* Host doesn't support read only detection so assume writeable */
364 	return 0;
365 }
366 
367 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
368 {
369 	struct pxamci_host *host = mmc_priv(mmc);
370 
371 	if (ios->clock) {
372 		unsigned long rate = host->clkrate;
373 		unsigned int clk = rate / ios->clock;
374 
375 		/*
376 		 * clk might result in a lower divisor than we
377 		 * desire.  check for that condition and adjust
378 		 * as appropriate.
379 		 */
380 		if (rate / clk > ios->clock)
381 			clk <<= 1;
382 		host->clkrt = fls(clk) - 1;
383 		clk_enable(host->clk);
384 
385 		/*
386 		 * we write clkrt on the next command
387 		 */
388 	} else {
389 		pxamci_stop_clock(host);
390 		clk_disable(host->clk);
391 	}
392 
393 	if (host->power_mode != ios->power_mode) {
394 		host->power_mode = ios->power_mode;
395 
396 		if (host->pdata && host->pdata->setpower)
397 			host->pdata->setpower(mmc_dev(mmc), ios->vdd);
398 
399 		if (ios->power_mode == MMC_POWER_ON)
400 			host->cmdat |= CMDAT_INIT;
401 	}
402 
403 	if (ios->bus_width == MMC_BUS_WIDTH_4)
404 		host->cmdat |= CMDAT_SD_4DAT;
405 	else
406 		host->cmdat &= ~CMDAT_SD_4DAT;
407 
408 	pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
409 		 host->clkrt, host->cmdat);
410 }
411 
412 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
413 {
414 	struct pxamci_host *pxa_host = mmc_priv(host);
415 
416 	if (enable)
417 		pxamci_enable_irq(pxa_host, SDIO_INT);
418 	else
419 		pxamci_disable_irq(pxa_host, SDIO_INT);
420 }
421 
422 static const struct mmc_host_ops pxamci_ops = {
423 	.request		= pxamci_request,
424 	.get_ro			= pxamci_get_ro,
425 	.set_ios		= pxamci_set_ios,
426 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
427 };
428 
429 static void pxamci_dma_irq(int dma, void *devid)
430 {
431 	struct pxamci_host *host = devid;
432 	int dcsr = DCSR(dma);
433 	DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
434 
435 	if (dcsr & DCSR_ENDINTR) {
436 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
437 	} else {
438 		printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
439 		       mmc_hostname(host->mmc), dma, dcsr);
440 		host->data->error = -EIO;
441 		pxamci_data_done(host, 0);
442 	}
443 }
444 
445 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
446 {
447 	struct pxamci_host *host = mmc_priv(devid);
448 
449 	mmc_detect_change(devid, host->pdata->detect_delay);
450 	return IRQ_HANDLED;
451 }
452 
453 static int pxamci_probe(struct platform_device *pdev)
454 {
455 	struct mmc_host *mmc;
456 	struct pxamci_host *host = NULL;
457 	struct resource *r;
458 	int ret, irq;
459 
460 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
461 	irq = platform_get_irq(pdev, 0);
462 	if (!r || irq < 0)
463 		return -ENXIO;
464 
465 	r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
466 	if (!r)
467 		return -EBUSY;
468 
469 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
470 	if (!mmc) {
471 		ret = -ENOMEM;
472 		goto out;
473 	}
474 
475 	mmc->ops = &pxamci_ops;
476 
477 	/*
478 	 * We can do SG-DMA, but we don't because we never know how much
479 	 * data we successfully wrote to the card.
480 	 */
481 	mmc->max_phys_segs = NR_SG;
482 
483 	/*
484 	 * Our hardware DMA can handle a maximum of one page per SG entry.
485 	 */
486 	mmc->max_seg_size = PAGE_SIZE;
487 
488 	/*
489 	 * Block length register is only 10 bits before PXA27x.
490 	 */
491 	mmc->max_blk_size = (cpu_is_pxa21x() || cpu_is_pxa25x()) ? 1023 : 2048;
492 
493 	/*
494 	 * Block count register is 16 bits.
495 	 */
496 	mmc->max_blk_count = 65535;
497 
498 	host = mmc_priv(mmc);
499 	host->mmc = mmc;
500 	host->dma = -1;
501 	host->pdata = pdev->dev.platform_data;
502 
503 	host->clk = clk_get(&pdev->dev, "MMCCLK");
504 	if (IS_ERR(host->clk)) {
505 		ret = PTR_ERR(host->clk);
506 		host->clk = NULL;
507 		goto out;
508 	}
509 
510 	host->clkrate = clk_get_rate(host->clk);
511 
512 	/*
513 	 * Calculate minimum clock rate, rounding up.
514 	 */
515 	mmc->f_min = (host->clkrate + 63) / 64;
516 	mmc->f_max = host->clkrate;
517 
518 	mmc->ocr_avail = host->pdata ?
519 			 host->pdata->ocr_mask :
520 			 MMC_VDD_32_33|MMC_VDD_33_34;
521 	mmc->caps = 0;
522 	host->cmdat = 0;
523 	if (!cpu_is_pxa21x() && !cpu_is_pxa25x()) {
524 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
525 		host->cmdat |= CMDAT_SDIO_INT_EN;
526 	}
527 
528 	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
529 	if (!host->sg_cpu) {
530 		ret = -ENOMEM;
531 		goto out;
532 	}
533 
534 	spin_lock_init(&host->lock);
535 	host->res = r;
536 	host->irq = irq;
537 	host->imask = MMC_I_MASK_ALL;
538 
539 	host->base = ioremap(r->start, SZ_4K);
540 	if (!host->base) {
541 		ret = -ENOMEM;
542 		goto out;
543 	}
544 
545 	/*
546 	 * Ensure that the host controller is shut down, and setup
547 	 * with our defaults.
548 	 */
549 	pxamci_stop_clock(host);
550 	writel(0, host->base + MMC_SPI);
551 	writel(64, host->base + MMC_RESTO);
552 	writel(host->imask, host->base + MMC_I_MASK);
553 
554 	host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
555 				    pxamci_dma_irq, host);
556 	if (host->dma < 0) {
557 		ret = -EBUSY;
558 		goto out;
559 	}
560 
561 	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
562 	if (ret)
563 		goto out;
564 
565 	platform_set_drvdata(pdev, mmc);
566 
567 	if (host->pdata && host->pdata->init)
568 		host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
569 
570 	mmc_add_host(mmc);
571 
572 	return 0;
573 
574  out:
575 	if (host) {
576 		if (host->dma >= 0)
577 			pxa_free_dma(host->dma);
578 		if (host->base)
579 			iounmap(host->base);
580 		if (host->sg_cpu)
581 			dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
582 		if (host->clk)
583 			clk_put(host->clk);
584 	}
585 	if (mmc)
586 		mmc_free_host(mmc);
587 	release_resource(r);
588 	return ret;
589 }
590 
591 static int pxamci_remove(struct platform_device *pdev)
592 {
593 	struct mmc_host *mmc = platform_get_drvdata(pdev);
594 
595 	platform_set_drvdata(pdev, NULL);
596 
597 	if (mmc) {
598 		struct pxamci_host *host = mmc_priv(mmc);
599 
600 		if (host->pdata && host->pdata->exit)
601 			host->pdata->exit(&pdev->dev, mmc);
602 
603 		mmc_remove_host(mmc);
604 
605 		pxamci_stop_clock(host);
606 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
607 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
608 		       host->base + MMC_I_MASK);
609 
610 		DRCMRRXMMC = 0;
611 		DRCMRTXMMC = 0;
612 
613 		free_irq(host->irq, host);
614 		pxa_free_dma(host->dma);
615 		iounmap(host->base);
616 		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
617 
618 		clk_put(host->clk);
619 
620 		release_resource(host->res);
621 
622 		mmc_free_host(mmc);
623 	}
624 	return 0;
625 }
626 
627 #ifdef CONFIG_PM
628 static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
629 {
630 	struct mmc_host *mmc = platform_get_drvdata(dev);
631 	int ret = 0;
632 
633 	if (mmc)
634 		ret = mmc_suspend_host(mmc, state);
635 
636 	return ret;
637 }
638 
639 static int pxamci_resume(struct platform_device *dev)
640 {
641 	struct mmc_host *mmc = platform_get_drvdata(dev);
642 	int ret = 0;
643 
644 	if (mmc)
645 		ret = mmc_resume_host(mmc);
646 
647 	return ret;
648 }
649 #else
650 #define pxamci_suspend	NULL
651 #define pxamci_resume	NULL
652 #endif
653 
654 static struct platform_driver pxamci_driver = {
655 	.probe		= pxamci_probe,
656 	.remove		= pxamci_remove,
657 	.suspend	= pxamci_suspend,
658 	.resume		= pxamci_resume,
659 	.driver		= {
660 		.name	= DRIVER_NAME,
661 	},
662 };
663 
664 static int __init pxamci_init(void)
665 {
666 	return platform_driver_register(&pxamci_driver);
667 }
668 
669 static void __exit pxamci_exit(void)
670 {
671 	platform_driver_unregister(&pxamci_driver);
672 }
673 
674 module_init(pxamci_init);
675 module_exit(pxamci_exit);
676 
677 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
678 MODULE_LICENSE("GPL");
679