1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 33 #include <asm/div64.h> 34 #include <asm/io.h> 35 #include <asm/sizes.h> 36 37 #include "mmci.h" 38 39 #define DRIVER_NAME "mmci-pl18x" 40 41 static unsigned int fmax = 515633; 42 43 /** 44 * struct variant_data - MMCI variant-specific quirks 45 * @clkreg: default value for MCICLOCK register 46 * @clkreg_enable: enable value for MMCICLOCK register 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register 48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 49 * is asserted (likewise for RX) 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 51 * is asserted (likewise for RX) 52 * @sdio: variant supports SDIO 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 54 */ 55 struct variant_data { 56 unsigned int clkreg; 57 unsigned int clkreg_enable; 58 unsigned int datalength_bits; 59 unsigned int fifosize; 60 unsigned int fifohalfsize; 61 bool sdio; 62 bool st_clkdiv; 63 }; 64 65 static struct variant_data variant_arm = { 66 .fifosize = 16 * 4, 67 .fifohalfsize = 8 * 4, 68 .datalength_bits = 16, 69 }; 70 71 static struct variant_data variant_arm_extended_fifo = { 72 .fifosize = 128 * 4, 73 .fifohalfsize = 64 * 4, 74 .datalength_bits = 16, 75 }; 76 77 static struct variant_data variant_u300 = { 78 .fifosize = 16 * 4, 79 .fifohalfsize = 8 * 4, 80 .clkreg_enable = MCI_ST_U300_HWFCEN, 81 .datalength_bits = 16, 82 .sdio = true, 83 }; 84 85 static struct variant_data variant_ux500 = { 86 .fifosize = 30 * 4, 87 .fifohalfsize = 8 * 4, 88 .clkreg = MCI_CLK_ENABLE, 89 .clkreg_enable = MCI_ST_UX500_HWFCEN, 90 .datalength_bits = 24, 91 .sdio = true, 92 .st_clkdiv = true, 93 }; 94 95 /* 96 * This must be called with host->lock held 97 */ 98 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 99 { 100 struct variant_data *variant = host->variant; 101 u32 clk = variant->clkreg; 102 103 if (desired) { 104 if (desired >= host->mclk) { 105 clk = MCI_CLK_BYPASS; 106 if (variant->st_clkdiv) 107 clk |= MCI_ST_UX500_NEG_EDGE; 108 host->cclk = host->mclk; 109 } else if (variant->st_clkdiv) { 110 /* 111 * DB8500 TRM says f = mclk / (clkdiv + 2) 112 * => clkdiv = (mclk / f) - 2 113 * Round the divider up so we don't exceed the max 114 * frequency 115 */ 116 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 117 if (clk >= 256) 118 clk = 255; 119 host->cclk = host->mclk / (clk + 2); 120 } else { 121 /* 122 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 123 * => clkdiv = mclk / (2 * f) - 1 124 */ 125 clk = host->mclk / (2 * desired) - 1; 126 if (clk >= 256) 127 clk = 255; 128 host->cclk = host->mclk / (2 * (clk + 1)); 129 } 130 131 clk |= variant->clkreg_enable; 132 clk |= MCI_CLK_ENABLE; 133 /* This hasn't proven to be worthwhile */ 134 /* clk |= MCI_CLK_PWRSAVE; */ 135 } 136 137 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 138 clk |= MCI_4BIT_BUS; 139 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 140 clk |= MCI_ST_8BIT_BUS; 141 142 writel(clk, host->base + MMCICLOCK); 143 } 144 145 static void 146 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 147 { 148 writel(0, host->base + MMCICOMMAND); 149 150 BUG_ON(host->data); 151 152 host->mrq = NULL; 153 host->cmd = NULL; 154 155 /* 156 * Need to drop the host lock here; mmc_request_done may call 157 * back into the driver... 158 */ 159 spin_unlock(&host->lock); 160 mmc_request_done(host->mmc, mrq); 161 spin_lock(&host->lock); 162 } 163 164 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 165 { 166 void __iomem *base = host->base; 167 168 if (host->singleirq) { 169 unsigned int mask0 = readl(base + MMCIMASK0); 170 171 mask0 &= ~MCI_IRQ1MASK; 172 mask0 |= mask; 173 174 writel(mask0, base + MMCIMASK0); 175 } 176 177 writel(mask, base + MMCIMASK1); 178 } 179 180 static void mmci_stop_data(struct mmci_host *host) 181 { 182 writel(0, host->base + MMCIDATACTRL); 183 mmci_set_mask1(host, 0); 184 host->data = NULL; 185 } 186 187 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 188 { 189 unsigned int flags = SG_MITER_ATOMIC; 190 191 if (data->flags & MMC_DATA_READ) 192 flags |= SG_MITER_TO_SG; 193 else 194 flags |= SG_MITER_FROM_SG; 195 196 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 197 } 198 199 /* 200 * All the DMA operation mode stuff goes inside this ifdef. 201 * This assumes that you have a generic DMA device interface, 202 * no custom DMA interfaces are supported. 203 */ 204 #ifdef CONFIG_DMA_ENGINE 205 static void __devinit mmci_dma_setup(struct mmci_host *host) 206 { 207 struct mmci_platform_data *plat = host->plat; 208 const char *rxname, *txname; 209 dma_cap_mask_t mask; 210 211 if (!plat || !plat->dma_filter) { 212 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 213 return; 214 } 215 216 /* Try to acquire a generic DMA engine slave channel */ 217 dma_cap_zero(mask); 218 dma_cap_set(DMA_SLAVE, mask); 219 220 /* 221 * If only an RX channel is specified, the driver will 222 * attempt to use it bidirectionally, however if it is 223 * is specified but cannot be located, DMA will be disabled. 224 */ 225 if (plat->dma_rx_param) { 226 host->dma_rx_channel = dma_request_channel(mask, 227 plat->dma_filter, 228 plat->dma_rx_param); 229 /* E.g if no DMA hardware is present */ 230 if (!host->dma_rx_channel) 231 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 232 } 233 234 if (plat->dma_tx_param) { 235 host->dma_tx_channel = dma_request_channel(mask, 236 plat->dma_filter, 237 plat->dma_tx_param); 238 if (!host->dma_tx_channel) 239 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 240 } else { 241 host->dma_tx_channel = host->dma_rx_channel; 242 } 243 244 if (host->dma_rx_channel) 245 rxname = dma_chan_name(host->dma_rx_channel); 246 else 247 rxname = "none"; 248 249 if (host->dma_tx_channel) 250 txname = dma_chan_name(host->dma_tx_channel); 251 else 252 txname = "none"; 253 254 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 255 rxname, txname); 256 257 /* 258 * Limit the maximum segment size in any SG entry according to 259 * the parameters of the DMA engine device. 260 */ 261 if (host->dma_tx_channel) { 262 struct device *dev = host->dma_tx_channel->device->dev; 263 unsigned int max_seg_size = dma_get_max_seg_size(dev); 264 265 if (max_seg_size < host->mmc->max_seg_size) 266 host->mmc->max_seg_size = max_seg_size; 267 } 268 if (host->dma_rx_channel) { 269 struct device *dev = host->dma_rx_channel->device->dev; 270 unsigned int max_seg_size = dma_get_max_seg_size(dev); 271 272 if (max_seg_size < host->mmc->max_seg_size) 273 host->mmc->max_seg_size = max_seg_size; 274 } 275 } 276 277 /* 278 * This is used in __devinit or __devexit so inline it 279 * so it can be discarded. 280 */ 281 static inline void mmci_dma_release(struct mmci_host *host) 282 { 283 struct mmci_platform_data *plat = host->plat; 284 285 if (host->dma_rx_channel) 286 dma_release_channel(host->dma_rx_channel); 287 if (host->dma_tx_channel && plat->dma_tx_param) 288 dma_release_channel(host->dma_tx_channel); 289 host->dma_rx_channel = host->dma_tx_channel = NULL; 290 } 291 292 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 293 { 294 struct dma_chan *chan = host->dma_current; 295 enum dma_data_direction dir; 296 u32 status; 297 int i; 298 299 /* Wait up to 1ms for the DMA to complete */ 300 for (i = 0; ; i++) { 301 status = readl(host->base + MMCISTATUS); 302 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 303 break; 304 udelay(10); 305 } 306 307 /* 308 * Check to see whether we still have some data left in the FIFO - 309 * this catches DMA controllers which are unable to monitor the 310 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 311 * contiguous buffers. On TX, we'll get a FIFO underrun error. 312 */ 313 if (status & MCI_RXDATAAVLBLMASK) { 314 dmaengine_terminate_all(chan); 315 if (!data->error) 316 data->error = -EIO; 317 } 318 319 if (data->flags & MMC_DATA_WRITE) { 320 dir = DMA_TO_DEVICE; 321 } else { 322 dir = DMA_FROM_DEVICE; 323 } 324 325 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 326 327 /* 328 * Use of DMA with scatter-gather is impossible. 329 * Give up with DMA and switch back to PIO mode. 330 */ 331 if (status & MCI_RXDATAAVLBLMASK) { 332 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 333 mmci_dma_release(host); 334 } 335 } 336 337 static void mmci_dma_data_error(struct mmci_host *host) 338 { 339 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 340 dmaengine_terminate_all(host->dma_current); 341 } 342 343 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 344 { 345 struct variant_data *variant = host->variant; 346 struct dma_slave_config conf = { 347 .src_addr = host->phybase + MMCIFIFO, 348 .dst_addr = host->phybase + MMCIFIFO, 349 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 350 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 351 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 352 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 353 }; 354 struct mmc_data *data = host->data; 355 struct dma_chan *chan; 356 struct dma_device *device; 357 struct dma_async_tx_descriptor *desc; 358 int nr_sg; 359 360 host->dma_current = NULL; 361 362 if (data->flags & MMC_DATA_READ) { 363 conf.direction = DMA_FROM_DEVICE; 364 chan = host->dma_rx_channel; 365 } else { 366 conf.direction = DMA_TO_DEVICE; 367 chan = host->dma_tx_channel; 368 } 369 370 /* If there's no DMA channel, fall back to PIO */ 371 if (!chan) 372 return -EINVAL; 373 374 /* If less than or equal to the fifo size, don't bother with DMA */ 375 if (host->size <= variant->fifosize) 376 return -EINVAL; 377 378 device = chan->device; 379 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 380 if (nr_sg == 0) 381 return -EINVAL; 382 383 dmaengine_slave_config(chan, &conf); 384 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 385 conf.direction, DMA_CTRL_ACK); 386 if (!desc) 387 goto unmap_exit; 388 389 /* Okay, go for it. */ 390 host->dma_current = chan; 391 392 dev_vdbg(mmc_dev(host->mmc), 393 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 394 data->sg_len, data->blksz, data->blocks, data->flags); 395 dmaengine_submit(desc); 396 dma_async_issue_pending(chan); 397 398 datactrl |= MCI_DPSM_DMAENABLE; 399 400 /* Trigger the DMA transfer */ 401 writel(datactrl, host->base + MMCIDATACTRL); 402 403 /* 404 * Let the MMCI say when the data is ended and it's time 405 * to fire next DMA request. When that happens, MMCI will 406 * call mmci_data_end() 407 */ 408 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 409 host->base + MMCIMASK0); 410 return 0; 411 412 unmap_exit: 413 dmaengine_terminate_all(chan); 414 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 415 return -ENOMEM; 416 } 417 #else 418 /* Blank functions if the DMA engine is not available */ 419 static inline void mmci_dma_setup(struct mmci_host *host) 420 { 421 } 422 423 static inline void mmci_dma_release(struct mmci_host *host) 424 { 425 } 426 427 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 428 { 429 } 430 431 static inline void mmci_dma_data_error(struct mmci_host *host) 432 { 433 } 434 435 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 436 { 437 return -ENOSYS; 438 } 439 #endif 440 441 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 442 { 443 struct variant_data *variant = host->variant; 444 unsigned int datactrl, timeout, irqmask; 445 unsigned long long clks; 446 void __iomem *base; 447 int blksz_bits; 448 449 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 450 data->blksz, data->blocks, data->flags); 451 452 host->data = data; 453 host->size = data->blksz * data->blocks; 454 data->bytes_xfered = 0; 455 456 clks = (unsigned long long)data->timeout_ns * host->cclk; 457 do_div(clks, 1000000000UL); 458 459 timeout = data->timeout_clks + (unsigned int)clks; 460 461 base = host->base; 462 writel(timeout, base + MMCIDATATIMER); 463 writel(host->size, base + MMCIDATALENGTH); 464 465 blksz_bits = ffs(data->blksz) - 1; 466 BUG_ON(1 << blksz_bits != data->blksz); 467 468 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 469 470 if (data->flags & MMC_DATA_READ) 471 datactrl |= MCI_DPSM_DIRECTION; 472 473 /* 474 * Attempt to use DMA operation mode, if this 475 * should fail, fall back to PIO mode 476 */ 477 if (!mmci_dma_start_data(host, datactrl)) 478 return; 479 480 /* IRQ mode, map the SG list for CPU reading/writing */ 481 mmci_init_sg(host, data); 482 483 if (data->flags & MMC_DATA_READ) { 484 irqmask = MCI_RXFIFOHALFFULLMASK; 485 486 /* 487 * If we have less than the fifo 'half-full' threshold to 488 * transfer, trigger a PIO interrupt as soon as any data 489 * is available. 490 */ 491 if (host->size < variant->fifohalfsize) 492 irqmask |= MCI_RXDATAAVLBLMASK; 493 } else { 494 /* 495 * We don't actually need to include "FIFO empty" here 496 * since its implicit in "FIFO half empty". 497 */ 498 irqmask = MCI_TXFIFOHALFEMPTYMASK; 499 } 500 501 /* The ST Micro variants has a special bit to enable SDIO */ 502 if (variant->sdio && host->mmc->card) 503 if (mmc_card_sdio(host->mmc->card)) 504 datactrl |= MCI_ST_DPSM_SDIOEN; 505 506 writel(datactrl, base + MMCIDATACTRL); 507 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 508 mmci_set_mask1(host, irqmask); 509 } 510 511 static void 512 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 513 { 514 void __iomem *base = host->base; 515 516 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 517 cmd->opcode, cmd->arg, cmd->flags); 518 519 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 520 writel(0, base + MMCICOMMAND); 521 udelay(1); 522 } 523 524 c |= cmd->opcode | MCI_CPSM_ENABLE; 525 if (cmd->flags & MMC_RSP_PRESENT) { 526 if (cmd->flags & MMC_RSP_136) 527 c |= MCI_CPSM_LONGRSP; 528 c |= MCI_CPSM_RESPONSE; 529 } 530 if (/*interrupt*/0) 531 c |= MCI_CPSM_INTERRUPT; 532 533 host->cmd = cmd; 534 535 writel(cmd->arg, base + MMCIARGUMENT); 536 writel(c, base + MMCICOMMAND); 537 } 538 539 static void 540 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 541 unsigned int status) 542 { 543 /* First check for errors */ 544 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 545 u32 remain, success; 546 547 /* Terminate the DMA transfer */ 548 if (dma_inprogress(host)) 549 mmci_dma_data_error(host); 550 551 /* 552 * Calculate how far we are into the transfer. Note that 553 * the data counter gives the number of bytes transferred 554 * on the MMC bus, not on the host side. On reads, this 555 * can be as much as a FIFO-worth of data ahead. This 556 * matters for FIFO overruns only. 557 */ 558 remain = readl(host->base + MMCIDATACNT); 559 success = data->blksz * data->blocks - remain; 560 561 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 562 status, success); 563 if (status & MCI_DATACRCFAIL) { 564 /* Last block was not successful */ 565 success -= 1; 566 data->error = -EILSEQ; 567 } else if (status & MCI_DATATIMEOUT) { 568 data->error = -ETIMEDOUT; 569 } else if (status & MCI_TXUNDERRUN) { 570 data->error = -EIO; 571 } else if (status & MCI_RXOVERRUN) { 572 if (success > host->variant->fifosize) 573 success -= host->variant->fifosize; 574 else 575 success = 0; 576 data->error = -EIO; 577 } 578 data->bytes_xfered = round_down(success, data->blksz); 579 } 580 581 if (status & MCI_DATABLOCKEND) 582 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 583 584 if (status & MCI_DATAEND || data->error) { 585 if (dma_inprogress(host)) 586 mmci_dma_unmap(host, data); 587 mmci_stop_data(host); 588 589 if (!data->error) 590 /* The error clause is handled above, success! */ 591 data->bytes_xfered = data->blksz * data->blocks; 592 593 if (!data->stop) { 594 mmci_request_end(host, data->mrq); 595 } else { 596 mmci_start_command(host, data->stop, 0); 597 } 598 } 599 } 600 601 static void 602 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 603 unsigned int status) 604 { 605 void __iomem *base = host->base; 606 607 host->cmd = NULL; 608 609 if (status & MCI_CMDTIMEOUT) { 610 cmd->error = -ETIMEDOUT; 611 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 612 cmd->error = -EILSEQ; 613 } else { 614 cmd->resp[0] = readl(base + MMCIRESPONSE0); 615 cmd->resp[1] = readl(base + MMCIRESPONSE1); 616 cmd->resp[2] = readl(base + MMCIRESPONSE2); 617 cmd->resp[3] = readl(base + MMCIRESPONSE3); 618 } 619 620 if (!cmd->data || cmd->error) { 621 if (host->data) 622 mmci_stop_data(host); 623 mmci_request_end(host, cmd->mrq); 624 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 625 mmci_start_data(host, cmd->data); 626 } 627 } 628 629 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 630 { 631 void __iomem *base = host->base; 632 char *ptr = buffer; 633 u32 status; 634 int host_remain = host->size; 635 636 do { 637 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 638 639 if (count > remain) 640 count = remain; 641 642 if (count <= 0) 643 break; 644 645 readsl(base + MMCIFIFO, ptr, count >> 2); 646 647 ptr += count; 648 remain -= count; 649 host_remain -= count; 650 651 if (remain == 0) 652 break; 653 654 status = readl(base + MMCISTATUS); 655 } while (status & MCI_RXDATAAVLBL); 656 657 return ptr - buffer; 658 } 659 660 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 661 { 662 struct variant_data *variant = host->variant; 663 void __iomem *base = host->base; 664 char *ptr = buffer; 665 666 do { 667 unsigned int count, maxcnt; 668 669 maxcnt = status & MCI_TXFIFOEMPTY ? 670 variant->fifosize : variant->fifohalfsize; 671 count = min(remain, maxcnt); 672 673 /* 674 * The ST Micro variant for SDIO transfer sizes 675 * less then 8 bytes should have clock H/W flow 676 * control disabled. 677 */ 678 if (variant->sdio && 679 mmc_card_sdio(host->mmc->card)) { 680 if (count < 8) 681 writel(readl(host->base + MMCICLOCK) & 682 ~variant->clkreg_enable, 683 host->base + MMCICLOCK); 684 else 685 writel(readl(host->base + MMCICLOCK) | 686 variant->clkreg_enable, 687 host->base + MMCICLOCK); 688 } 689 690 /* 691 * SDIO especially may want to send something that is 692 * not divisible by 4 (as opposed to card sectors 693 * etc), and the FIFO only accept full 32-bit writes. 694 * So compensate by adding +3 on the count, a single 695 * byte become a 32bit write, 7 bytes will be two 696 * 32bit writes etc. 697 */ 698 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 699 700 ptr += count; 701 remain -= count; 702 703 if (remain == 0) 704 break; 705 706 status = readl(base + MMCISTATUS); 707 } while (status & MCI_TXFIFOHALFEMPTY); 708 709 return ptr - buffer; 710 } 711 712 /* 713 * PIO data transfer IRQ handler. 714 */ 715 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 716 { 717 struct mmci_host *host = dev_id; 718 struct sg_mapping_iter *sg_miter = &host->sg_miter; 719 struct variant_data *variant = host->variant; 720 void __iomem *base = host->base; 721 unsigned long flags; 722 u32 status; 723 724 status = readl(base + MMCISTATUS); 725 726 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 727 728 local_irq_save(flags); 729 730 do { 731 unsigned int remain, len; 732 char *buffer; 733 734 /* 735 * For write, we only need to test the half-empty flag 736 * here - if the FIFO is completely empty, then by 737 * definition it is more than half empty. 738 * 739 * For read, check for data available. 740 */ 741 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 742 break; 743 744 if (!sg_miter_next(sg_miter)) 745 break; 746 747 buffer = sg_miter->addr; 748 remain = sg_miter->length; 749 750 len = 0; 751 if (status & MCI_RXACTIVE) 752 len = mmci_pio_read(host, buffer, remain); 753 if (status & MCI_TXACTIVE) 754 len = mmci_pio_write(host, buffer, remain, status); 755 756 sg_miter->consumed = len; 757 758 host->size -= len; 759 remain -= len; 760 761 if (remain) 762 break; 763 764 status = readl(base + MMCISTATUS); 765 } while (1); 766 767 sg_miter_stop(sg_miter); 768 769 local_irq_restore(flags); 770 771 /* 772 * If we have less than the fifo 'half-full' threshold to transfer, 773 * trigger a PIO interrupt as soon as any data is available. 774 */ 775 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 776 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 777 778 /* 779 * If we run out of data, disable the data IRQs; this 780 * prevents a race where the FIFO becomes empty before 781 * the chip itself has disabled the data path, and 782 * stops us racing with our data end IRQ. 783 */ 784 if (host->size == 0) { 785 mmci_set_mask1(host, 0); 786 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 787 } 788 789 return IRQ_HANDLED; 790 } 791 792 /* 793 * Handle completion of command and data transfers. 794 */ 795 static irqreturn_t mmci_irq(int irq, void *dev_id) 796 { 797 struct mmci_host *host = dev_id; 798 u32 status; 799 int ret = 0; 800 801 spin_lock(&host->lock); 802 803 do { 804 struct mmc_command *cmd; 805 struct mmc_data *data; 806 807 status = readl(host->base + MMCISTATUS); 808 809 if (host->singleirq) { 810 if (status & readl(host->base + MMCIMASK1)) 811 mmci_pio_irq(irq, dev_id); 812 813 status &= ~MCI_IRQ1MASK; 814 } 815 816 status &= readl(host->base + MMCIMASK0); 817 writel(status, host->base + MMCICLEAR); 818 819 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 820 821 data = host->data; 822 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 823 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 824 mmci_data_irq(host, data, status); 825 826 cmd = host->cmd; 827 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 828 mmci_cmd_irq(host, cmd, status); 829 830 ret = 1; 831 } while (status); 832 833 spin_unlock(&host->lock); 834 835 return IRQ_RETVAL(ret); 836 } 837 838 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 839 { 840 struct mmci_host *host = mmc_priv(mmc); 841 unsigned long flags; 842 843 WARN_ON(host->mrq != NULL); 844 845 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 846 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 847 mrq->data->blksz); 848 mrq->cmd->error = -EINVAL; 849 mmc_request_done(mmc, mrq); 850 return; 851 } 852 853 spin_lock_irqsave(&host->lock, flags); 854 855 host->mrq = mrq; 856 857 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 858 mmci_start_data(host, mrq->data); 859 860 mmci_start_command(host, mrq->cmd, 0); 861 862 spin_unlock_irqrestore(&host->lock, flags); 863 } 864 865 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 866 { 867 struct mmci_host *host = mmc_priv(mmc); 868 u32 pwr = 0; 869 unsigned long flags; 870 int ret; 871 872 switch (ios->power_mode) { 873 case MMC_POWER_OFF: 874 if (host->vcc) 875 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 876 break; 877 case MMC_POWER_UP: 878 if (host->vcc) { 879 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 880 if (ret) { 881 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 882 /* 883 * The .set_ios() function in the mmc_host_ops 884 * struct return void, and failing to set the 885 * power should be rare so we print an error 886 * and return here. 887 */ 888 return; 889 } 890 } 891 if (host->plat->vdd_handler) 892 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 893 ios->power_mode); 894 /* The ST version does not have this, fall through to POWER_ON */ 895 if (host->hw_designer != AMBA_VENDOR_ST) { 896 pwr |= MCI_PWR_UP; 897 break; 898 } 899 case MMC_POWER_ON: 900 pwr |= MCI_PWR_ON; 901 break; 902 } 903 904 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 905 if (host->hw_designer != AMBA_VENDOR_ST) 906 pwr |= MCI_ROD; 907 else { 908 /* 909 * The ST Micro variant use the ROD bit for something 910 * else and only has OD (Open Drain). 911 */ 912 pwr |= MCI_OD; 913 } 914 } 915 916 spin_lock_irqsave(&host->lock, flags); 917 918 mmci_set_clkreg(host, ios->clock); 919 920 if (host->pwr != pwr) { 921 host->pwr = pwr; 922 writel(pwr, host->base + MMCIPOWER); 923 } 924 925 spin_unlock_irqrestore(&host->lock, flags); 926 } 927 928 static int mmci_get_ro(struct mmc_host *mmc) 929 { 930 struct mmci_host *host = mmc_priv(mmc); 931 932 if (host->gpio_wp == -ENOSYS) 933 return -ENOSYS; 934 935 return gpio_get_value_cansleep(host->gpio_wp); 936 } 937 938 static int mmci_get_cd(struct mmc_host *mmc) 939 { 940 struct mmci_host *host = mmc_priv(mmc); 941 struct mmci_platform_data *plat = host->plat; 942 unsigned int status; 943 944 if (host->gpio_cd == -ENOSYS) { 945 if (!plat->status) 946 return 1; /* Assume always present */ 947 948 status = plat->status(mmc_dev(host->mmc)); 949 } else 950 status = !!gpio_get_value_cansleep(host->gpio_cd) 951 ^ plat->cd_invert; 952 953 /* 954 * Use positive logic throughout - status is zero for no card, 955 * non-zero for card inserted. 956 */ 957 return status; 958 } 959 960 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 961 { 962 struct mmci_host *host = dev_id; 963 964 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 965 966 return IRQ_HANDLED; 967 } 968 969 static const struct mmc_host_ops mmci_ops = { 970 .request = mmci_request, 971 .set_ios = mmci_set_ios, 972 .get_ro = mmci_get_ro, 973 .get_cd = mmci_get_cd, 974 }; 975 976 static int __devinit mmci_probe(struct amba_device *dev, 977 const struct amba_id *id) 978 { 979 struct mmci_platform_data *plat = dev->dev.platform_data; 980 struct variant_data *variant = id->data; 981 struct mmci_host *host; 982 struct mmc_host *mmc; 983 int ret; 984 985 /* must have platform data */ 986 if (!plat) { 987 ret = -EINVAL; 988 goto out; 989 } 990 991 ret = amba_request_regions(dev, DRIVER_NAME); 992 if (ret) 993 goto out; 994 995 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 996 if (!mmc) { 997 ret = -ENOMEM; 998 goto rel_regions; 999 } 1000 1001 host = mmc_priv(mmc); 1002 host->mmc = mmc; 1003 1004 host->gpio_wp = -ENOSYS; 1005 host->gpio_cd = -ENOSYS; 1006 host->gpio_cd_irq = -1; 1007 1008 host->hw_designer = amba_manf(dev); 1009 host->hw_revision = amba_rev(dev); 1010 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1011 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1012 1013 host->clk = clk_get(&dev->dev, NULL); 1014 if (IS_ERR(host->clk)) { 1015 ret = PTR_ERR(host->clk); 1016 host->clk = NULL; 1017 goto host_free; 1018 } 1019 1020 ret = clk_enable(host->clk); 1021 if (ret) 1022 goto clk_free; 1023 1024 host->plat = plat; 1025 host->variant = variant; 1026 host->mclk = clk_get_rate(host->clk); 1027 /* 1028 * According to the spec, mclk is max 100 MHz, 1029 * so we try to adjust the clock down to this, 1030 * (if possible). 1031 */ 1032 if (host->mclk > 100000000) { 1033 ret = clk_set_rate(host->clk, 100000000); 1034 if (ret < 0) 1035 goto clk_disable; 1036 host->mclk = clk_get_rate(host->clk); 1037 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1038 host->mclk); 1039 } 1040 host->phybase = dev->res.start; 1041 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1042 if (!host->base) { 1043 ret = -ENOMEM; 1044 goto clk_disable; 1045 } 1046 1047 mmc->ops = &mmci_ops; 1048 mmc->f_min = (host->mclk + 511) / 512; 1049 /* 1050 * If the platform data supplies a maximum operating 1051 * frequency, this takes precedence. Else, we fall back 1052 * to using the module parameter, which has a (low) 1053 * default value in case it is not specified. Either 1054 * value must not exceed the clock rate into the block, 1055 * of course. 1056 */ 1057 if (plat->f_max) 1058 mmc->f_max = min(host->mclk, plat->f_max); 1059 else 1060 mmc->f_max = min(host->mclk, fmax); 1061 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1062 1063 #ifdef CONFIG_REGULATOR 1064 /* If we're using the regulator framework, try to fetch a regulator */ 1065 host->vcc = regulator_get(&dev->dev, "vmmc"); 1066 if (IS_ERR(host->vcc)) 1067 host->vcc = NULL; 1068 else { 1069 int mask = mmc_regulator_get_ocrmask(host->vcc); 1070 1071 if (mask < 0) 1072 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1073 mask); 1074 else { 1075 host->mmc->ocr_avail = (u32) mask; 1076 if (plat->ocr_mask) 1077 dev_warn(&dev->dev, 1078 "Provided ocr_mask/setpower will not be used " 1079 "(using regulator instead)\n"); 1080 } 1081 } 1082 #endif 1083 /* Fall back to platform data if no regulator is found */ 1084 if (host->vcc == NULL) 1085 mmc->ocr_avail = plat->ocr_mask; 1086 mmc->caps = plat->capabilities; 1087 1088 /* 1089 * We can do SGIO 1090 */ 1091 mmc->max_segs = NR_SG; 1092 1093 /* 1094 * Since only a certain number of bits are valid in the data length 1095 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1096 * single request. 1097 */ 1098 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1099 1100 /* 1101 * Set the maximum segment size. Since we aren't doing DMA 1102 * (yet) we are only limited by the data length register. 1103 */ 1104 mmc->max_seg_size = mmc->max_req_size; 1105 1106 /* 1107 * Block size can be up to 2048 bytes, but must be a power of two. 1108 */ 1109 mmc->max_blk_size = 2048; 1110 1111 /* 1112 * No limit on the number of blocks transferred. 1113 */ 1114 mmc->max_blk_count = mmc->max_req_size; 1115 1116 spin_lock_init(&host->lock); 1117 1118 writel(0, host->base + MMCIMASK0); 1119 writel(0, host->base + MMCIMASK1); 1120 writel(0xfff, host->base + MMCICLEAR); 1121 1122 if (gpio_is_valid(plat->gpio_cd)) { 1123 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1124 if (ret == 0) 1125 ret = gpio_direction_input(plat->gpio_cd); 1126 if (ret == 0) 1127 host->gpio_cd = plat->gpio_cd; 1128 else if (ret != -ENOSYS) 1129 goto err_gpio_cd; 1130 1131 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1132 mmci_cd_irq, 0, 1133 DRIVER_NAME " (cd)", host); 1134 if (ret >= 0) 1135 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1136 } 1137 if (gpio_is_valid(plat->gpio_wp)) { 1138 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1139 if (ret == 0) 1140 ret = gpio_direction_input(plat->gpio_wp); 1141 if (ret == 0) 1142 host->gpio_wp = plat->gpio_wp; 1143 else if (ret != -ENOSYS) 1144 goto err_gpio_wp; 1145 } 1146 1147 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1148 && host->gpio_cd_irq < 0) 1149 mmc->caps |= MMC_CAP_NEEDS_POLL; 1150 1151 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1152 if (ret) 1153 goto unmap; 1154 1155 if (dev->irq[1] == NO_IRQ) 1156 host->singleirq = true; 1157 else { 1158 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1159 DRIVER_NAME " (pio)", host); 1160 if (ret) 1161 goto irq0_free; 1162 } 1163 1164 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1165 1166 amba_set_drvdata(dev, mmc); 1167 1168 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1169 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1170 amba_rev(dev), (unsigned long long)dev->res.start, 1171 dev->irq[0], dev->irq[1]); 1172 1173 mmci_dma_setup(host); 1174 1175 mmc_add_host(mmc); 1176 1177 return 0; 1178 1179 irq0_free: 1180 free_irq(dev->irq[0], host); 1181 unmap: 1182 if (host->gpio_wp != -ENOSYS) 1183 gpio_free(host->gpio_wp); 1184 err_gpio_wp: 1185 if (host->gpio_cd_irq >= 0) 1186 free_irq(host->gpio_cd_irq, host); 1187 if (host->gpio_cd != -ENOSYS) 1188 gpio_free(host->gpio_cd); 1189 err_gpio_cd: 1190 iounmap(host->base); 1191 clk_disable: 1192 clk_disable(host->clk); 1193 clk_free: 1194 clk_put(host->clk); 1195 host_free: 1196 mmc_free_host(mmc); 1197 rel_regions: 1198 amba_release_regions(dev); 1199 out: 1200 return ret; 1201 } 1202 1203 static int __devexit mmci_remove(struct amba_device *dev) 1204 { 1205 struct mmc_host *mmc = amba_get_drvdata(dev); 1206 1207 amba_set_drvdata(dev, NULL); 1208 1209 if (mmc) { 1210 struct mmci_host *host = mmc_priv(mmc); 1211 1212 mmc_remove_host(mmc); 1213 1214 writel(0, host->base + MMCIMASK0); 1215 writel(0, host->base + MMCIMASK1); 1216 1217 writel(0, host->base + MMCICOMMAND); 1218 writel(0, host->base + MMCIDATACTRL); 1219 1220 mmci_dma_release(host); 1221 free_irq(dev->irq[0], host); 1222 if (!host->singleirq) 1223 free_irq(dev->irq[1], host); 1224 1225 if (host->gpio_wp != -ENOSYS) 1226 gpio_free(host->gpio_wp); 1227 if (host->gpio_cd_irq >= 0) 1228 free_irq(host->gpio_cd_irq, host); 1229 if (host->gpio_cd != -ENOSYS) 1230 gpio_free(host->gpio_cd); 1231 1232 iounmap(host->base); 1233 clk_disable(host->clk); 1234 clk_put(host->clk); 1235 1236 if (host->vcc) 1237 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1238 regulator_put(host->vcc); 1239 1240 mmc_free_host(mmc); 1241 1242 amba_release_regions(dev); 1243 } 1244 1245 return 0; 1246 } 1247 1248 #ifdef CONFIG_PM 1249 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1250 { 1251 struct mmc_host *mmc = amba_get_drvdata(dev); 1252 int ret = 0; 1253 1254 if (mmc) { 1255 struct mmci_host *host = mmc_priv(mmc); 1256 1257 ret = mmc_suspend_host(mmc); 1258 if (ret == 0) 1259 writel(0, host->base + MMCIMASK0); 1260 } 1261 1262 return ret; 1263 } 1264 1265 static int mmci_resume(struct amba_device *dev) 1266 { 1267 struct mmc_host *mmc = amba_get_drvdata(dev); 1268 int ret = 0; 1269 1270 if (mmc) { 1271 struct mmci_host *host = mmc_priv(mmc); 1272 1273 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1274 1275 ret = mmc_resume_host(mmc); 1276 } 1277 1278 return ret; 1279 } 1280 #else 1281 #define mmci_suspend NULL 1282 #define mmci_resume NULL 1283 #endif 1284 1285 static struct amba_id mmci_ids[] = { 1286 { 1287 .id = 0x00041180, 1288 .mask = 0xff0fffff, 1289 .data = &variant_arm, 1290 }, 1291 { 1292 .id = 0x01041180, 1293 .mask = 0xff0fffff, 1294 .data = &variant_arm_extended_fifo, 1295 }, 1296 { 1297 .id = 0x00041181, 1298 .mask = 0x000fffff, 1299 .data = &variant_arm, 1300 }, 1301 /* ST Micro variants */ 1302 { 1303 .id = 0x00180180, 1304 .mask = 0x00ffffff, 1305 .data = &variant_u300, 1306 }, 1307 { 1308 .id = 0x00280180, 1309 .mask = 0x00ffffff, 1310 .data = &variant_u300, 1311 }, 1312 { 1313 .id = 0x00480180, 1314 .mask = 0x00ffffff, 1315 .data = &variant_ux500, 1316 }, 1317 { 0, 0 }, 1318 }; 1319 1320 static struct amba_driver mmci_driver = { 1321 .drv = { 1322 .name = DRIVER_NAME, 1323 }, 1324 .probe = mmci_probe, 1325 .remove = __devexit_p(mmci_remove), 1326 .suspend = mmci_suspend, 1327 .resume = mmci_resume, 1328 .id_table = mmci_ids, 1329 }; 1330 1331 static int __init mmci_init(void) 1332 { 1333 return amba_driver_register(&mmci_driver); 1334 } 1335 1336 static void __exit mmci_exit(void) 1337 { 1338 amba_driver_unregister(&mmci_driver); 1339 } 1340 1341 module_init(mmci_init); 1342 module_exit(mmci_exit); 1343 module_param(fmax, uint, 0444); 1344 1345 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1346 MODULE_LICENSE("GPL"); 1347