1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 33 #include <asm/div64.h> 34 #include <asm/io.h> 35 #include <asm/sizes.h> 36 37 #include "mmci.h" 38 39 #define DRIVER_NAME "mmci-pl18x" 40 41 static unsigned int fmax = 515633; 42 43 /** 44 * struct variant_data - MMCI variant-specific quirks 45 * @clkreg: default value for MCICLOCK register 46 * @clkreg_enable: enable value for MMCICLOCK register 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register 48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 49 * is asserted (likewise for RX) 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 51 * is asserted (likewise for RX) 52 * @sdio: variant supports SDIO 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 54 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 55 */ 56 struct variant_data { 57 unsigned int clkreg; 58 unsigned int clkreg_enable; 59 unsigned int datalength_bits; 60 unsigned int fifosize; 61 unsigned int fifohalfsize; 62 bool sdio; 63 bool st_clkdiv; 64 bool blksz_datactrl16; 65 }; 66 67 static struct variant_data variant_arm = { 68 .fifosize = 16 * 4, 69 .fifohalfsize = 8 * 4, 70 .datalength_bits = 16, 71 }; 72 73 static struct variant_data variant_arm_extended_fifo = { 74 .fifosize = 128 * 4, 75 .fifohalfsize = 64 * 4, 76 .datalength_bits = 16, 77 }; 78 79 static struct variant_data variant_u300 = { 80 .fifosize = 16 * 4, 81 .fifohalfsize = 8 * 4, 82 .clkreg_enable = MCI_ST_U300_HWFCEN, 83 .datalength_bits = 16, 84 .sdio = true, 85 }; 86 87 static struct variant_data variant_ux500 = { 88 .fifosize = 30 * 4, 89 .fifohalfsize = 8 * 4, 90 .clkreg = MCI_CLK_ENABLE, 91 .clkreg_enable = MCI_ST_UX500_HWFCEN, 92 .datalength_bits = 24, 93 .sdio = true, 94 .st_clkdiv = true, 95 }; 96 97 static struct variant_data variant_ux500v2 = { 98 .fifosize = 30 * 4, 99 .fifohalfsize = 8 * 4, 100 .clkreg = MCI_CLK_ENABLE, 101 .clkreg_enable = MCI_ST_UX500_HWFCEN, 102 .datalength_bits = 24, 103 .sdio = true, 104 .st_clkdiv = true, 105 .blksz_datactrl16 = true, 106 }; 107 108 /* 109 * This must be called with host->lock held 110 */ 111 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 112 { 113 struct variant_data *variant = host->variant; 114 u32 clk = variant->clkreg; 115 116 if (desired) { 117 if (desired >= host->mclk) { 118 clk = MCI_CLK_BYPASS; 119 if (variant->st_clkdiv) 120 clk |= MCI_ST_UX500_NEG_EDGE; 121 host->cclk = host->mclk; 122 } else if (variant->st_clkdiv) { 123 /* 124 * DB8500 TRM says f = mclk / (clkdiv + 2) 125 * => clkdiv = (mclk / f) - 2 126 * Round the divider up so we don't exceed the max 127 * frequency 128 */ 129 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 130 if (clk >= 256) 131 clk = 255; 132 host->cclk = host->mclk / (clk + 2); 133 } else { 134 /* 135 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 136 * => clkdiv = mclk / (2 * f) - 1 137 */ 138 clk = host->mclk / (2 * desired) - 1; 139 if (clk >= 256) 140 clk = 255; 141 host->cclk = host->mclk / (2 * (clk + 1)); 142 } 143 144 clk |= variant->clkreg_enable; 145 clk |= MCI_CLK_ENABLE; 146 /* This hasn't proven to be worthwhile */ 147 /* clk |= MCI_CLK_PWRSAVE; */ 148 } 149 150 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 151 clk |= MCI_4BIT_BUS; 152 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 153 clk |= MCI_ST_8BIT_BUS; 154 155 writel(clk, host->base + MMCICLOCK); 156 } 157 158 static void 159 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 160 { 161 writel(0, host->base + MMCICOMMAND); 162 163 BUG_ON(host->data); 164 165 host->mrq = NULL; 166 host->cmd = NULL; 167 168 /* 169 * Need to drop the host lock here; mmc_request_done may call 170 * back into the driver... 171 */ 172 spin_unlock(&host->lock); 173 mmc_request_done(host->mmc, mrq); 174 spin_lock(&host->lock); 175 } 176 177 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 178 { 179 void __iomem *base = host->base; 180 181 if (host->singleirq) { 182 unsigned int mask0 = readl(base + MMCIMASK0); 183 184 mask0 &= ~MCI_IRQ1MASK; 185 mask0 |= mask; 186 187 writel(mask0, base + MMCIMASK0); 188 } 189 190 writel(mask, base + MMCIMASK1); 191 } 192 193 static void mmci_stop_data(struct mmci_host *host) 194 { 195 writel(0, host->base + MMCIDATACTRL); 196 mmci_set_mask1(host, 0); 197 host->data = NULL; 198 } 199 200 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 201 { 202 unsigned int flags = SG_MITER_ATOMIC; 203 204 if (data->flags & MMC_DATA_READ) 205 flags |= SG_MITER_TO_SG; 206 else 207 flags |= SG_MITER_FROM_SG; 208 209 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 210 } 211 212 /* 213 * All the DMA operation mode stuff goes inside this ifdef. 214 * This assumes that you have a generic DMA device interface, 215 * no custom DMA interfaces are supported. 216 */ 217 #ifdef CONFIG_DMA_ENGINE 218 static void __devinit mmci_dma_setup(struct mmci_host *host) 219 { 220 struct mmci_platform_data *plat = host->plat; 221 const char *rxname, *txname; 222 dma_cap_mask_t mask; 223 224 if (!plat || !plat->dma_filter) { 225 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 226 return; 227 } 228 229 /* initialize pre request cookie */ 230 host->next_data.cookie = 1; 231 232 /* Try to acquire a generic DMA engine slave channel */ 233 dma_cap_zero(mask); 234 dma_cap_set(DMA_SLAVE, mask); 235 236 /* 237 * If only an RX channel is specified, the driver will 238 * attempt to use it bidirectionally, however if it is 239 * is specified but cannot be located, DMA will be disabled. 240 */ 241 if (plat->dma_rx_param) { 242 host->dma_rx_channel = dma_request_channel(mask, 243 plat->dma_filter, 244 plat->dma_rx_param); 245 /* E.g if no DMA hardware is present */ 246 if (!host->dma_rx_channel) 247 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 248 } 249 250 if (plat->dma_tx_param) { 251 host->dma_tx_channel = dma_request_channel(mask, 252 plat->dma_filter, 253 plat->dma_tx_param); 254 if (!host->dma_tx_channel) 255 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 256 } else { 257 host->dma_tx_channel = host->dma_rx_channel; 258 } 259 260 if (host->dma_rx_channel) 261 rxname = dma_chan_name(host->dma_rx_channel); 262 else 263 rxname = "none"; 264 265 if (host->dma_tx_channel) 266 txname = dma_chan_name(host->dma_tx_channel); 267 else 268 txname = "none"; 269 270 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 271 rxname, txname); 272 273 /* 274 * Limit the maximum segment size in any SG entry according to 275 * the parameters of the DMA engine device. 276 */ 277 if (host->dma_tx_channel) { 278 struct device *dev = host->dma_tx_channel->device->dev; 279 unsigned int max_seg_size = dma_get_max_seg_size(dev); 280 281 if (max_seg_size < host->mmc->max_seg_size) 282 host->mmc->max_seg_size = max_seg_size; 283 } 284 if (host->dma_rx_channel) { 285 struct device *dev = host->dma_rx_channel->device->dev; 286 unsigned int max_seg_size = dma_get_max_seg_size(dev); 287 288 if (max_seg_size < host->mmc->max_seg_size) 289 host->mmc->max_seg_size = max_seg_size; 290 } 291 } 292 293 /* 294 * This is used in __devinit or __devexit so inline it 295 * so it can be discarded. 296 */ 297 static inline void mmci_dma_release(struct mmci_host *host) 298 { 299 struct mmci_platform_data *plat = host->plat; 300 301 if (host->dma_rx_channel) 302 dma_release_channel(host->dma_rx_channel); 303 if (host->dma_tx_channel && plat->dma_tx_param) 304 dma_release_channel(host->dma_tx_channel); 305 host->dma_rx_channel = host->dma_tx_channel = NULL; 306 } 307 308 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 309 { 310 struct dma_chan *chan = host->dma_current; 311 enum dma_data_direction dir; 312 u32 status; 313 int i; 314 315 /* Wait up to 1ms for the DMA to complete */ 316 for (i = 0; ; i++) { 317 status = readl(host->base + MMCISTATUS); 318 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 319 break; 320 udelay(10); 321 } 322 323 /* 324 * Check to see whether we still have some data left in the FIFO - 325 * this catches DMA controllers which are unable to monitor the 326 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 327 * contiguous buffers. On TX, we'll get a FIFO underrun error. 328 */ 329 if (status & MCI_RXDATAAVLBLMASK) { 330 dmaengine_terminate_all(chan); 331 if (!data->error) 332 data->error = -EIO; 333 } 334 335 if (data->flags & MMC_DATA_WRITE) { 336 dir = DMA_TO_DEVICE; 337 } else { 338 dir = DMA_FROM_DEVICE; 339 } 340 341 if (!data->host_cookie) 342 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 343 344 /* 345 * Use of DMA with scatter-gather is impossible. 346 * Give up with DMA and switch back to PIO mode. 347 */ 348 if (status & MCI_RXDATAAVLBLMASK) { 349 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 350 mmci_dma_release(host); 351 } 352 } 353 354 static void mmci_dma_data_error(struct mmci_host *host) 355 { 356 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 357 dmaengine_terminate_all(host->dma_current); 358 } 359 360 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 361 struct mmci_host_next *next) 362 { 363 struct variant_data *variant = host->variant; 364 struct dma_slave_config conf = { 365 .src_addr = host->phybase + MMCIFIFO, 366 .dst_addr = host->phybase + MMCIFIFO, 367 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 368 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 369 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 370 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 371 }; 372 struct dma_chan *chan; 373 struct dma_device *device; 374 struct dma_async_tx_descriptor *desc; 375 int nr_sg; 376 377 /* Check if next job is already prepared */ 378 if (data->host_cookie && !next && 379 host->dma_current && host->dma_desc_current) 380 return 0; 381 382 if (!next) { 383 host->dma_current = NULL; 384 host->dma_desc_current = NULL; 385 } 386 387 if (data->flags & MMC_DATA_READ) { 388 conf.direction = DMA_FROM_DEVICE; 389 chan = host->dma_rx_channel; 390 } else { 391 conf.direction = DMA_TO_DEVICE; 392 chan = host->dma_tx_channel; 393 } 394 395 /* If there's no DMA channel, fall back to PIO */ 396 if (!chan) 397 return -EINVAL; 398 399 /* If less than or equal to the fifo size, don't bother with DMA */ 400 if (data->blksz * data->blocks <= variant->fifosize) 401 return -EINVAL; 402 403 device = chan->device; 404 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 405 if (nr_sg == 0) 406 return -EINVAL; 407 408 dmaengine_slave_config(chan, &conf); 409 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 410 conf.direction, DMA_CTRL_ACK); 411 if (!desc) 412 goto unmap_exit; 413 414 if (next) { 415 next->dma_chan = chan; 416 next->dma_desc = desc; 417 } else { 418 host->dma_current = chan; 419 host->dma_desc_current = desc; 420 } 421 422 return 0; 423 424 unmap_exit: 425 if (!next) 426 dmaengine_terminate_all(chan); 427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 428 return -ENOMEM; 429 } 430 431 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 432 { 433 int ret; 434 struct mmc_data *data = host->data; 435 436 ret = mmci_dma_prep_data(host, host->data, NULL); 437 if (ret) 438 return ret; 439 440 /* Okay, go for it. */ 441 dev_vdbg(mmc_dev(host->mmc), 442 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 443 data->sg_len, data->blksz, data->blocks, data->flags); 444 dmaengine_submit(host->dma_desc_current); 445 dma_async_issue_pending(host->dma_current); 446 447 datactrl |= MCI_DPSM_DMAENABLE; 448 449 /* Trigger the DMA transfer */ 450 writel(datactrl, host->base + MMCIDATACTRL); 451 452 /* 453 * Let the MMCI say when the data is ended and it's time 454 * to fire next DMA request. When that happens, MMCI will 455 * call mmci_data_end() 456 */ 457 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 458 host->base + MMCIMASK0); 459 return 0; 460 } 461 462 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 463 { 464 struct mmci_host_next *next = &host->next_data; 465 466 if (data->host_cookie && data->host_cookie != next->cookie) { 467 printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" 468 " host->next_data.cookie %d\n", 469 __func__, data->host_cookie, host->next_data.cookie); 470 data->host_cookie = 0; 471 } 472 473 if (!data->host_cookie) 474 return; 475 476 host->dma_desc_current = next->dma_desc; 477 host->dma_current = next->dma_chan; 478 479 next->dma_desc = NULL; 480 next->dma_chan = NULL; 481 } 482 483 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 484 bool is_first_req) 485 { 486 struct mmci_host *host = mmc_priv(mmc); 487 struct mmc_data *data = mrq->data; 488 struct mmci_host_next *nd = &host->next_data; 489 490 if (!data) 491 return; 492 493 if (data->host_cookie) { 494 data->host_cookie = 0; 495 return; 496 } 497 498 /* if config for dma */ 499 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 500 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 501 if (mmci_dma_prep_data(host, data, nd)) 502 data->host_cookie = 0; 503 else 504 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 505 } 506 } 507 508 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 509 int err) 510 { 511 struct mmci_host *host = mmc_priv(mmc); 512 struct mmc_data *data = mrq->data; 513 struct dma_chan *chan; 514 enum dma_data_direction dir; 515 516 if (!data) 517 return; 518 519 if (data->flags & MMC_DATA_READ) { 520 dir = DMA_FROM_DEVICE; 521 chan = host->dma_rx_channel; 522 } else { 523 dir = DMA_TO_DEVICE; 524 chan = host->dma_tx_channel; 525 } 526 527 528 /* if config for dma */ 529 if (chan) { 530 if (err) 531 dmaengine_terminate_all(chan); 532 if (err || data->host_cookie) 533 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 534 data->sg_len, dir); 535 mrq->data->host_cookie = 0; 536 } 537 } 538 539 #else 540 /* Blank functions if the DMA engine is not available */ 541 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 542 { 543 } 544 static inline void mmci_dma_setup(struct mmci_host *host) 545 { 546 } 547 548 static inline void mmci_dma_release(struct mmci_host *host) 549 { 550 } 551 552 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 553 { 554 } 555 556 static inline void mmci_dma_data_error(struct mmci_host *host) 557 { 558 } 559 560 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 561 { 562 return -ENOSYS; 563 } 564 565 #define mmci_pre_request NULL 566 #define mmci_post_request NULL 567 568 #endif 569 570 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 571 { 572 struct variant_data *variant = host->variant; 573 unsigned int datactrl, timeout, irqmask; 574 unsigned long long clks; 575 void __iomem *base; 576 int blksz_bits; 577 578 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 579 data->blksz, data->blocks, data->flags); 580 581 host->data = data; 582 host->size = data->blksz * data->blocks; 583 data->bytes_xfered = 0; 584 585 clks = (unsigned long long)data->timeout_ns * host->cclk; 586 do_div(clks, 1000000000UL); 587 588 timeout = data->timeout_clks + (unsigned int)clks; 589 590 base = host->base; 591 writel(timeout, base + MMCIDATATIMER); 592 writel(host->size, base + MMCIDATALENGTH); 593 594 blksz_bits = ffs(data->blksz) - 1; 595 BUG_ON(1 << blksz_bits != data->blksz); 596 597 if (variant->blksz_datactrl16) 598 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 599 else 600 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 601 602 if (data->flags & MMC_DATA_READ) 603 datactrl |= MCI_DPSM_DIRECTION; 604 605 /* 606 * Attempt to use DMA operation mode, if this 607 * should fail, fall back to PIO mode 608 */ 609 if (!mmci_dma_start_data(host, datactrl)) 610 return; 611 612 /* IRQ mode, map the SG list for CPU reading/writing */ 613 mmci_init_sg(host, data); 614 615 if (data->flags & MMC_DATA_READ) { 616 irqmask = MCI_RXFIFOHALFFULLMASK; 617 618 /* 619 * If we have less than the fifo 'half-full' threshold to 620 * transfer, trigger a PIO interrupt as soon as any data 621 * is available. 622 */ 623 if (host->size < variant->fifohalfsize) 624 irqmask |= MCI_RXDATAAVLBLMASK; 625 } else { 626 /* 627 * We don't actually need to include "FIFO empty" here 628 * since its implicit in "FIFO half empty". 629 */ 630 irqmask = MCI_TXFIFOHALFEMPTYMASK; 631 } 632 633 /* The ST Micro variants has a special bit to enable SDIO */ 634 if (variant->sdio && host->mmc->card) 635 if (mmc_card_sdio(host->mmc->card)) 636 datactrl |= MCI_ST_DPSM_SDIOEN; 637 638 writel(datactrl, base + MMCIDATACTRL); 639 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 640 mmci_set_mask1(host, irqmask); 641 } 642 643 static void 644 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 645 { 646 void __iomem *base = host->base; 647 648 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 649 cmd->opcode, cmd->arg, cmd->flags); 650 651 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 652 writel(0, base + MMCICOMMAND); 653 udelay(1); 654 } 655 656 c |= cmd->opcode | MCI_CPSM_ENABLE; 657 if (cmd->flags & MMC_RSP_PRESENT) { 658 if (cmd->flags & MMC_RSP_136) 659 c |= MCI_CPSM_LONGRSP; 660 c |= MCI_CPSM_RESPONSE; 661 } 662 if (/*interrupt*/0) 663 c |= MCI_CPSM_INTERRUPT; 664 665 host->cmd = cmd; 666 667 writel(cmd->arg, base + MMCIARGUMENT); 668 writel(c, base + MMCICOMMAND); 669 } 670 671 static void 672 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 673 unsigned int status) 674 { 675 /* First check for errors */ 676 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 677 u32 remain, success; 678 679 /* Terminate the DMA transfer */ 680 if (dma_inprogress(host)) 681 mmci_dma_data_error(host); 682 683 /* 684 * Calculate how far we are into the transfer. Note that 685 * the data counter gives the number of bytes transferred 686 * on the MMC bus, not on the host side. On reads, this 687 * can be as much as a FIFO-worth of data ahead. This 688 * matters for FIFO overruns only. 689 */ 690 remain = readl(host->base + MMCIDATACNT); 691 success = data->blksz * data->blocks - remain; 692 693 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 694 status, success); 695 if (status & MCI_DATACRCFAIL) { 696 /* Last block was not successful */ 697 success -= 1; 698 data->error = -EILSEQ; 699 } else if (status & MCI_DATATIMEOUT) { 700 data->error = -ETIMEDOUT; 701 } else if (status & MCI_STARTBITERR) { 702 data->error = -ECOMM; 703 } else if (status & MCI_TXUNDERRUN) { 704 data->error = -EIO; 705 } else if (status & MCI_RXOVERRUN) { 706 if (success > host->variant->fifosize) 707 success -= host->variant->fifosize; 708 else 709 success = 0; 710 data->error = -EIO; 711 } 712 data->bytes_xfered = round_down(success, data->blksz); 713 } 714 715 if (status & MCI_DATABLOCKEND) 716 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 717 718 if (status & MCI_DATAEND || data->error) { 719 if (dma_inprogress(host)) 720 mmci_dma_unmap(host, data); 721 mmci_stop_data(host); 722 723 if (!data->error) 724 /* The error clause is handled above, success! */ 725 data->bytes_xfered = data->blksz * data->blocks; 726 727 if (!data->stop) { 728 mmci_request_end(host, data->mrq); 729 } else { 730 mmci_start_command(host, data->stop, 0); 731 } 732 } 733 } 734 735 static void 736 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 737 unsigned int status) 738 { 739 void __iomem *base = host->base; 740 741 host->cmd = NULL; 742 743 if (status & MCI_CMDTIMEOUT) { 744 cmd->error = -ETIMEDOUT; 745 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 746 cmd->error = -EILSEQ; 747 } else { 748 cmd->resp[0] = readl(base + MMCIRESPONSE0); 749 cmd->resp[1] = readl(base + MMCIRESPONSE1); 750 cmd->resp[2] = readl(base + MMCIRESPONSE2); 751 cmd->resp[3] = readl(base + MMCIRESPONSE3); 752 } 753 754 if (!cmd->data || cmd->error) { 755 if (host->data) 756 mmci_stop_data(host); 757 mmci_request_end(host, cmd->mrq); 758 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 759 mmci_start_data(host, cmd->data); 760 } 761 } 762 763 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 764 { 765 void __iomem *base = host->base; 766 char *ptr = buffer; 767 u32 status; 768 int host_remain = host->size; 769 770 do { 771 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 772 773 if (count > remain) 774 count = remain; 775 776 if (count <= 0) 777 break; 778 779 readsl(base + MMCIFIFO, ptr, count >> 2); 780 781 ptr += count; 782 remain -= count; 783 host_remain -= count; 784 785 if (remain == 0) 786 break; 787 788 status = readl(base + MMCISTATUS); 789 } while (status & MCI_RXDATAAVLBL); 790 791 return ptr - buffer; 792 } 793 794 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 795 { 796 struct variant_data *variant = host->variant; 797 void __iomem *base = host->base; 798 char *ptr = buffer; 799 800 do { 801 unsigned int count, maxcnt; 802 803 maxcnt = status & MCI_TXFIFOEMPTY ? 804 variant->fifosize : variant->fifohalfsize; 805 count = min(remain, maxcnt); 806 807 /* 808 * The ST Micro variant for SDIO transfer sizes 809 * less then 8 bytes should have clock H/W flow 810 * control disabled. 811 */ 812 if (variant->sdio && 813 mmc_card_sdio(host->mmc->card)) { 814 if (count < 8) 815 writel(readl(host->base + MMCICLOCK) & 816 ~variant->clkreg_enable, 817 host->base + MMCICLOCK); 818 else 819 writel(readl(host->base + MMCICLOCK) | 820 variant->clkreg_enable, 821 host->base + MMCICLOCK); 822 } 823 824 /* 825 * SDIO especially may want to send something that is 826 * not divisible by 4 (as opposed to card sectors 827 * etc), and the FIFO only accept full 32-bit writes. 828 * So compensate by adding +3 on the count, a single 829 * byte become a 32bit write, 7 bytes will be two 830 * 32bit writes etc. 831 */ 832 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 833 834 ptr += count; 835 remain -= count; 836 837 if (remain == 0) 838 break; 839 840 status = readl(base + MMCISTATUS); 841 } while (status & MCI_TXFIFOHALFEMPTY); 842 843 return ptr - buffer; 844 } 845 846 /* 847 * PIO data transfer IRQ handler. 848 */ 849 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 850 { 851 struct mmci_host *host = dev_id; 852 struct sg_mapping_iter *sg_miter = &host->sg_miter; 853 struct variant_data *variant = host->variant; 854 void __iomem *base = host->base; 855 unsigned long flags; 856 u32 status; 857 858 status = readl(base + MMCISTATUS); 859 860 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 861 862 local_irq_save(flags); 863 864 do { 865 unsigned int remain, len; 866 char *buffer; 867 868 /* 869 * For write, we only need to test the half-empty flag 870 * here - if the FIFO is completely empty, then by 871 * definition it is more than half empty. 872 * 873 * For read, check for data available. 874 */ 875 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 876 break; 877 878 if (!sg_miter_next(sg_miter)) 879 break; 880 881 buffer = sg_miter->addr; 882 remain = sg_miter->length; 883 884 len = 0; 885 if (status & MCI_RXACTIVE) 886 len = mmci_pio_read(host, buffer, remain); 887 if (status & MCI_TXACTIVE) 888 len = mmci_pio_write(host, buffer, remain, status); 889 890 sg_miter->consumed = len; 891 892 host->size -= len; 893 remain -= len; 894 895 if (remain) 896 break; 897 898 status = readl(base + MMCISTATUS); 899 } while (1); 900 901 sg_miter_stop(sg_miter); 902 903 local_irq_restore(flags); 904 905 /* 906 * If we have less than the fifo 'half-full' threshold to transfer, 907 * trigger a PIO interrupt as soon as any data is available. 908 */ 909 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 910 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 911 912 /* 913 * If we run out of data, disable the data IRQs; this 914 * prevents a race where the FIFO becomes empty before 915 * the chip itself has disabled the data path, and 916 * stops us racing with our data end IRQ. 917 */ 918 if (host->size == 0) { 919 mmci_set_mask1(host, 0); 920 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 921 } 922 923 return IRQ_HANDLED; 924 } 925 926 /* 927 * Handle completion of command and data transfers. 928 */ 929 static irqreturn_t mmci_irq(int irq, void *dev_id) 930 { 931 struct mmci_host *host = dev_id; 932 u32 status; 933 int ret = 0; 934 935 spin_lock(&host->lock); 936 937 do { 938 struct mmc_command *cmd; 939 struct mmc_data *data; 940 941 status = readl(host->base + MMCISTATUS); 942 943 if (host->singleirq) { 944 if (status & readl(host->base + MMCIMASK1)) 945 mmci_pio_irq(irq, dev_id); 946 947 status &= ~MCI_IRQ1MASK; 948 } 949 950 status &= readl(host->base + MMCIMASK0); 951 writel(status, host->base + MMCICLEAR); 952 953 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 954 955 data = host->data; 956 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 957 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 958 mmci_data_irq(host, data, status); 959 960 cmd = host->cmd; 961 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 962 mmci_cmd_irq(host, cmd, status); 963 964 ret = 1; 965 } while (status); 966 967 spin_unlock(&host->lock); 968 969 return IRQ_RETVAL(ret); 970 } 971 972 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 973 { 974 struct mmci_host *host = mmc_priv(mmc); 975 unsigned long flags; 976 977 WARN_ON(host->mrq != NULL); 978 979 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 980 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 981 mrq->data->blksz); 982 mrq->cmd->error = -EINVAL; 983 mmc_request_done(mmc, mrq); 984 return; 985 } 986 987 spin_lock_irqsave(&host->lock, flags); 988 989 host->mrq = mrq; 990 991 if (mrq->data) 992 mmci_get_next_data(host, mrq->data); 993 994 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 995 mmci_start_data(host, mrq->data); 996 997 mmci_start_command(host, mrq->cmd, 0); 998 999 spin_unlock_irqrestore(&host->lock, flags); 1000 } 1001 1002 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1003 { 1004 struct mmci_host *host = mmc_priv(mmc); 1005 u32 pwr = 0; 1006 unsigned long flags; 1007 int ret; 1008 1009 switch (ios->power_mode) { 1010 case MMC_POWER_OFF: 1011 if (host->vcc) 1012 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1013 break; 1014 case MMC_POWER_UP: 1015 if (host->vcc) { 1016 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1017 if (ret) { 1018 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1019 /* 1020 * The .set_ios() function in the mmc_host_ops 1021 * struct return void, and failing to set the 1022 * power should be rare so we print an error 1023 * and return here. 1024 */ 1025 return; 1026 } 1027 } 1028 if (host->plat->vdd_handler) 1029 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 1030 ios->power_mode); 1031 /* The ST version does not have this, fall through to POWER_ON */ 1032 if (host->hw_designer != AMBA_VENDOR_ST) { 1033 pwr |= MCI_PWR_UP; 1034 break; 1035 } 1036 case MMC_POWER_ON: 1037 pwr |= MCI_PWR_ON; 1038 break; 1039 } 1040 1041 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1042 if (host->hw_designer != AMBA_VENDOR_ST) 1043 pwr |= MCI_ROD; 1044 else { 1045 /* 1046 * The ST Micro variant use the ROD bit for something 1047 * else and only has OD (Open Drain). 1048 */ 1049 pwr |= MCI_OD; 1050 } 1051 } 1052 1053 spin_lock_irqsave(&host->lock, flags); 1054 1055 mmci_set_clkreg(host, ios->clock); 1056 1057 if (host->pwr != pwr) { 1058 host->pwr = pwr; 1059 writel(pwr, host->base + MMCIPOWER); 1060 } 1061 1062 spin_unlock_irqrestore(&host->lock, flags); 1063 } 1064 1065 static int mmci_get_ro(struct mmc_host *mmc) 1066 { 1067 struct mmci_host *host = mmc_priv(mmc); 1068 1069 if (host->gpio_wp == -ENOSYS) 1070 return -ENOSYS; 1071 1072 return gpio_get_value_cansleep(host->gpio_wp); 1073 } 1074 1075 static int mmci_get_cd(struct mmc_host *mmc) 1076 { 1077 struct mmci_host *host = mmc_priv(mmc); 1078 struct mmci_platform_data *plat = host->plat; 1079 unsigned int status; 1080 1081 if (host->gpio_cd == -ENOSYS) { 1082 if (!plat->status) 1083 return 1; /* Assume always present */ 1084 1085 status = plat->status(mmc_dev(host->mmc)); 1086 } else 1087 status = !!gpio_get_value_cansleep(host->gpio_cd) 1088 ^ plat->cd_invert; 1089 1090 /* 1091 * Use positive logic throughout - status is zero for no card, 1092 * non-zero for card inserted. 1093 */ 1094 return status; 1095 } 1096 1097 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1098 { 1099 struct mmci_host *host = dev_id; 1100 1101 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1102 1103 return IRQ_HANDLED; 1104 } 1105 1106 static const struct mmc_host_ops mmci_ops = { 1107 .request = mmci_request, 1108 .pre_req = mmci_pre_request, 1109 .post_req = mmci_post_request, 1110 .set_ios = mmci_set_ios, 1111 .get_ro = mmci_get_ro, 1112 .get_cd = mmci_get_cd, 1113 }; 1114 1115 static int __devinit mmci_probe(struct amba_device *dev, 1116 const struct amba_id *id) 1117 { 1118 struct mmci_platform_data *plat = dev->dev.platform_data; 1119 struct variant_data *variant = id->data; 1120 struct mmci_host *host; 1121 struct mmc_host *mmc; 1122 int ret; 1123 1124 /* must have platform data */ 1125 if (!plat) { 1126 ret = -EINVAL; 1127 goto out; 1128 } 1129 1130 ret = amba_request_regions(dev, DRIVER_NAME); 1131 if (ret) 1132 goto out; 1133 1134 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1135 if (!mmc) { 1136 ret = -ENOMEM; 1137 goto rel_regions; 1138 } 1139 1140 host = mmc_priv(mmc); 1141 host->mmc = mmc; 1142 1143 host->gpio_wp = -ENOSYS; 1144 host->gpio_cd = -ENOSYS; 1145 host->gpio_cd_irq = -1; 1146 1147 host->hw_designer = amba_manf(dev); 1148 host->hw_revision = amba_rev(dev); 1149 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1150 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1151 1152 host->clk = clk_get(&dev->dev, NULL); 1153 if (IS_ERR(host->clk)) { 1154 ret = PTR_ERR(host->clk); 1155 host->clk = NULL; 1156 goto host_free; 1157 } 1158 1159 ret = clk_enable(host->clk); 1160 if (ret) 1161 goto clk_free; 1162 1163 host->plat = plat; 1164 host->variant = variant; 1165 host->mclk = clk_get_rate(host->clk); 1166 /* 1167 * According to the spec, mclk is max 100 MHz, 1168 * so we try to adjust the clock down to this, 1169 * (if possible). 1170 */ 1171 if (host->mclk > 100000000) { 1172 ret = clk_set_rate(host->clk, 100000000); 1173 if (ret < 0) 1174 goto clk_disable; 1175 host->mclk = clk_get_rate(host->clk); 1176 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1177 host->mclk); 1178 } 1179 host->phybase = dev->res.start; 1180 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1181 if (!host->base) { 1182 ret = -ENOMEM; 1183 goto clk_disable; 1184 } 1185 1186 mmc->ops = &mmci_ops; 1187 mmc->f_min = (host->mclk + 511) / 512; 1188 /* 1189 * If the platform data supplies a maximum operating 1190 * frequency, this takes precedence. Else, we fall back 1191 * to using the module parameter, which has a (low) 1192 * default value in case it is not specified. Either 1193 * value must not exceed the clock rate into the block, 1194 * of course. 1195 */ 1196 if (plat->f_max) 1197 mmc->f_max = min(host->mclk, plat->f_max); 1198 else 1199 mmc->f_max = min(host->mclk, fmax); 1200 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1201 1202 #ifdef CONFIG_REGULATOR 1203 /* If we're using the regulator framework, try to fetch a regulator */ 1204 host->vcc = regulator_get(&dev->dev, "vmmc"); 1205 if (IS_ERR(host->vcc)) 1206 host->vcc = NULL; 1207 else { 1208 int mask = mmc_regulator_get_ocrmask(host->vcc); 1209 1210 if (mask < 0) 1211 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1212 mask); 1213 else { 1214 host->mmc->ocr_avail = (u32) mask; 1215 if (plat->ocr_mask) 1216 dev_warn(&dev->dev, 1217 "Provided ocr_mask/setpower will not be used " 1218 "(using regulator instead)\n"); 1219 } 1220 } 1221 #endif 1222 /* Fall back to platform data if no regulator is found */ 1223 if (host->vcc == NULL) 1224 mmc->ocr_avail = plat->ocr_mask; 1225 mmc->caps = plat->capabilities; 1226 1227 /* 1228 * We can do SGIO 1229 */ 1230 mmc->max_segs = NR_SG; 1231 1232 /* 1233 * Since only a certain number of bits are valid in the data length 1234 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1235 * single request. 1236 */ 1237 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1238 1239 /* 1240 * Set the maximum segment size. Since we aren't doing DMA 1241 * (yet) we are only limited by the data length register. 1242 */ 1243 mmc->max_seg_size = mmc->max_req_size; 1244 1245 /* 1246 * Block size can be up to 2048 bytes, but must be a power of two. 1247 */ 1248 mmc->max_blk_size = 2048; 1249 1250 /* 1251 * No limit on the number of blocks transferred. 1252 */ 1253 mmc->max_blk_count = mmc->max_req_size; 1254 1255 spin_lock_init(&host->lock); 1256 1257 writel(0, host->base + MMCIMASK0); 1258 writel(0, host->base + MMCIMASK1); 1259 writel(0xfff, host->base + MMCICLEAR); 1260 1261 if (gpio_is_valid(plat->gpio_cd)) { 1262 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1263 if (ret == 0) 1264 ret = gpio_direction_input(plat->gpio_cd); 1265 if (ret == 0) 1266 host->gpio_cd = plat->gpio_cd; 1267 else if (ret != -ENOSYS) 1268 goto err_gpio_cd; 1269 1270 /* 1271 * A gpio pin that will detect cards when inserted and removed 1272 * will most likely want to trigger on the edges if it is 1273 * 0 when ejected and 1 when inserted (or mutatis mutandis 1274 * for the inverted case) so we request triggers on both 1275 * edges. 1276 */ 1277 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1278 mmci_cd_irq, 1279 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1280 DRIVER_NAME " (cd)", host); 1281 if (ret >= 0) 1282 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1283 } 1284 if (gpio_is_valid(plat->gpio_wp)) { 1285 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1286 if (ret == 0) 1287 ret = gpio_direction_input(plat->gpio_wp); 1288 if (ret == 0) 1289 host->gpio_wp = plat->gpio_wp; 1290 else if (ret != -ENOSYS) 1291 goto err_gpio_wp; 1292 } 1293 1294 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1295 && host->gpio_cd_irq < 0) 1296 mmc->caps |= MMC_CAP_NEEDS_POLL; 1297 1298 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1299 if (ret) 1300 goto unmap; 1301 1302 if (dev->irq[1] == NO_IRQ) 1303 host->singleirq = true; 1304 else { 1305 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1306 DRIVER_NAME " (pio)", host); 1307 if (ret) 1308 goto irq0_free; 1309 } 1310 1311 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1312 1313 amba_set_drvdata(dev, mmc); 1314 1315 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1316 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1317 amba_rev(dev), (unsigned long long)dev->res.start, 1318 dev->irq[0], dev->irq[1]); 1319 1320 mmci_dma_setup(host); 1321 1322 mmc_add_host(mmc); 1323 1324 return 0; 1325 1326 irq0_free: 1327 free_irq(dev->irq[0], host); 1328 unmap: 1329 if (host->gpio_wp != -ENOSYS) 1330 gpio_free(host->gpio_wp); 1331 err_gpio_wp: 1332 if (host->gpio_cd_irq >= 0) 1333 free_irq(host->gpio_cd_irq, host); 1334 if (host->gpio_cd != -ENOSYS) 1335 gpio_free(host->gpio_cd); 1336 err_gpio_cd: 1337 iounmap(host->base); 1338 clk_disable: 1339 clk_disable(host->clk); 1340 clk_free: 1341 clk_put(host->clk); 1342 host_free: 1343 mmc_free_host(mmc); 1344 rel_regions: 1345 amba_release_regions(dev); 1346 out: 1347 return ret; 1348 } 1349 1350 static int __devexit mmci_remove(struct amba_device *dev) 1351 { 1352 struct mmc_host *mmc = amba_get_drvdata(dev); 1353 1354 amba_set_drvdata(dev, NULL); 1355 1356 if (mmc) { 1357 struct mmci_host *host = mmc_priv(mmc); 1358 1359 mmc_remove_host(mmc); 1360 1361 writel(0, host->base + MMCIMASK0); 1362 writel(0, host->base + MMCIMASK1); 1363 1364 writel(0, host->base + MMCICOMMAND); 1365 writel(0, host->base + MMCIDATACTRL); 1366 1367 mmci_dma_release(host); 1368 free_irq(dev->irq[0], host); 1369 if (!host->singleirq) 1370 free_irq(dev->irq[1], host); 1371 1372 if (host->gpio_wp != -ENOSYS) 1373 gpio_free(host->gpio_wp); 1374 if (host->gpio_cd_irq >= 0) 1375 free_irq(host->gpio_cd_irq, host); 1376 if (host->gpio_cd != -ENOSYS) 1377 gpio_free(host->gpio_cd); 1378 1379 iounmap(host->base); 1380 clk_disable(host->clk); 1381 clk_put(host->clk); 1382 1383 if (host->vcc) 1384 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1385 regulator_put(host->vcc); 1386 1387 mmc_free_host(mmc); 1388 1389 amba_release_regions(dev); 1390 } 1391 1392 return 0; 1393 } 1394 1395 #ifdef CONFIG_PM 1396 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1397 { 1398 struct mmc_host *mmc = amba_get_drvdata(dev); 1399 int ret = 0; 1400 1401 if (mmc) { 1402 struct mmci_host *host = mmc_priv(mmc); 1403 1404 ret = mmc_suspend_host(mmc); 1405 if (ret == 0) 1406 writel(0, host->base + MMCIMASK0); 1407 } 1408 1409 return ret; 1410 } 1411 1412 static int mmci_resume(struct amba_device *dev) 1413 { 1414 struct mmc_host *mmc = amba_get_drvdata(dev); 1415 int ret = 0; 1416 1417 if (mmc) { 1418 struct mmci_host *host = mmc_priv(mmc); 1419 1420 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1421 1422 ret = mmc_resume_host(mmc); 1423 } 1424 1425 return ret; 1426 } 1427 #else 1428 #define mmci_suspend NULL 1429 #define mmci_resume NULL 1430 #endif 1431 1432 static struct amba_id mmci_ids[] = { 1433 { 1434 .id = 0x00041180, 1435 .mask = 0xff0fffff, 1436 .data = &variant_arm, 1437 }, 1438 { 1439 .id = 0x01041180, 1440 .mask = 0xff0fffff, 1441 .data = &variant_arm_extended_fifo, 1442 }, 1443 { 1444 .id = 0x00041181, 1445 .mask = 0x000fffff, 1446 .data = &variant_arm, 1447 }, 1448 /* ST Micro variants */ 1449 { 1450 .id = 0x00180180, 1451 .mask = 0x00ffffff, 1452 .data = &variant_u300, 1453 }, 1454 { 1455 .id = 0x00280180, 1456 .mask = 0x00ffffff, 1457 .data = &variant_u300, 1458 }, 1459 { 1460 .id = 0x00480180, 1461 .mask = 0xf0ffffff, 1462 .data = &variant_ux500, 1463 }, 1464 { 1465 .id = 0x10480180, 1466 .mask = 0xf0ffffff, 1467 .data = &variant_ux500v2, 1468 }, 1469 { 0, 0 }, 1470 }; 1471 1472 static struct amba_driver mmci_driver = { 1473 .drv = { 1474 .name = DRIVER_NAME, 1475 }, 1476 .probe = mmci_probe, 1477 .remove = __devexit_p(mmci_remove), 1478 .suspend = mmci_suspend, 1479 .resume = mmci_resume, 1480 .id_table = mmci_ids, 1481 }; 1482 1483 static int __init mmci_init(void) 1484 { 1485 return amba_driver_register(&mmci_driver); 1486 } 1487 1488 static void __exit mmci_exit(void) 1489 { 1490 amba_driver_unregister(&mmci_driver); 1491 } 1492 1493 module_init(mmci_init); 1494 module_exit(mmci_exit); 1495 module_param(fmax, uint, 0444); 1496 1497 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1498 MODULE_LICENSE("GPL"); 1499