1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 33 #include <asm/div64.h> 34 #include <asm/io.h> 35 #include <asm/sizes.h> 36 37 #include "mmci.h" 38 39 #define DRIVER_NAME "mmci-pl18x" 40 41 static unsigned int fmax = 515633; 42 43 /** 44 * struct variant_data - MMCI variant-specific quirks 45 * @clkreg: default value for MCICLOCK register 46 * @clkreg_enable: enable value for MMCICLOCK register 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register 48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 49 * is asserted (likewise for RX) 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 51 * is asserted (likewise for RX) 52 * @sdio: variant supports SDIO 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 54 */ 55 struct variant_data { 56 unsigned int clkreg; 57 unsigned int clkreg_enable; 58 unsigned int datalength_bits; 59 unsigned int fifosize; 60 unsigned int fifohalfsize; 61 bool sdio; 62 bool st_clkdiv; 63 }; 64 65 static struct variant_data variant_arm = { 66 .fifosize = 16 * 4, 67 .fifohalfsize = 8 * 4, 68 .datalength_bits = 16, 69 }; 70 71 static struct variant_data variant_arm_extended_fifo = { 72 .fifosize = 128 * 4, 73 .fifohalfsize = 64 * 4, 74 .datalength_bits = 16, 75 }; 76 77 static struct variant_data variant_u300 = { 78 .fifosize = 16 * 4, 79 .fifohalfsize = 8 * 4, 80 .clkreg_enable = MCI_ST_U300_HWFCEN, 81 .datalength_bits = 16, 82 .sdio = true, 83 }; 84 85 static struct variant_data variant_ux500 = { 86 .fifosize = 30 * 4, 87 .fifohalfsize = 8 * 4, 88 .clkreg = MCI_CLK_ENABLE, 89 .clkreg_enable = MCI_ST_UX500_HWFCEN, 90 .datalength_bits = 24, 91 .sdio = true, 92 .st_clkdiv = true, 93 }; 94 95 /* 96 * This must be called with host->lock held 97 */ 98 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 99 { 100 struct variant_data *variant = host->variant; 101 u32 clk = variant->clkreg; 102 103 if (desired) { 104 if (desired >= host->mclk) { 105 clk = MCI_CLK_BYPASS; 106 host->cclk = host->mclk; 107 } else if (variant->st_clkdiv) { 108 /* 109 * DB8500 TRM says f = mclk / (clkdiv + 2) 110 * => clkdiv = (mclk / f) - 2 111 * Round the divider up so we don't exceed the max 112 * frequency 113 */ 114 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 115 if (clk >= 256) 116 clk = 255; 117 host->cclk = host->mclk / (clk + 2); 118 } else { 119 /* 120 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 121 * => clkdiv = mclk / (2 * f) - 1 122 */ 123 clk = host->mclk / (2 * desired) - 1; 124 if (clk >= 256) 125 clk = 255; 126 host->cclk = host->mclk / (2 * (clk + 1)); 127 } 128 129 clk |= variant->clkreg_enable; 130 clk |= MCI_CLK_ENABLE; 131 /* This hasn't proven to be worthwhile */ 132 /* clk |= MCI_CLK_PWRSAVE; */ 133 } 134 135 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 136 clk |= MCI_4BIT_BUS; 137 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 138 clk |= MCI_ST_8BIT_BUS; 139 140 writel(clk, host->base + MMCICLOCK); 141 } 142 143 static void 144 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 145 { 146 writel(0, host->base + MMCICOMMAND); 147 148 BUG_ON(host->data); 149 150 host->mrq = NULL; 151 host->cmd = NULL; 152 153 /* 154 * Need to drop the host lock here; mmc_request_done may call 155 * back into the driver... 156 */ 157 spin_unlock(&host->lock); 158 mmc_request_done(host->mmc, mrq); 159 spin_lock(&host->lock); 160 } 161 162 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 163 { 164 void __iomem *base = host->base; 165 166 if (host->singleirq) { 167 unsigned int mask0 = readl(base + MMCIMASK0); 168 169 mask0 &= ~MCI_IRQ1MASK; 170 mask0 |= mask; 171 172 writel(mask0, base + MMCIMASK0); 173 } 174 175 writel(mask, base + MMCIMASK1); 176 } 177 178 static void mmci_stop_data(struct mmci_host *host) 179 { 180 writel(0, host->base + MMCIDATACTRL); 181 mmci_set_mask1(host, 0); 182 host->data = NULL; 183 } 184 185 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 186 { 187 unsigned int flags = SG_MITER_ATOMIC; 188 189 if (data->flags & MMC_DATA_READ) 190 flags |= SG_MITER_TO_SG; 191 else 192 flags |= SG_MITER_FROM_SG; 193 194 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 195 } 196 197 /* 198 * All the DMA operation mode stuff goes inside this ifdef. 199 * This assumes that you have a generic DMA device interface, 200 * no custom DMA interfaces are supported. 201 */ 202 #ifdef CONFIG_DMA_ENGINE 203 static void __devinit mmci_dma_setup(struct mmci_host *host) 204 { 205 struct mmci_platform_data *plat = host->plat; 206 const char *rxname, *txname; 207 dma_cap_mask_t mask; 208 209 if (!plat || !plat->dma_filter) { 210 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 211 return; 212 } 213 214 /* Try to acquire a generic DMA engine slave channel */ 215 dma_cap_zero(mask); 216 dma_cap_set(DMA_SLAVE, mask); 217 218 /* 219 * If only an RX channel is specified, the driver will 220 * attempt to use it bidirectionally, however if it is 221 * is specified but cannot be located, DMA will be disabled. 222 */ 223 if (plat->dma_rx_param) { 224 host->dma_rx_channel = dma_request_channel(mask, 225 plat->dma_filter, 226 plat->dma_rx_param); 227 /* E.g if no DMA hardware is present */ 228 if (!host->dma_rx_channel) 229 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 230 } 231 232 if (plat->dma_tx_param) { 233 host->dma_tx_channel = dma_request_channel(mask, 234 plat->dma_filter, 235 plat->dma_tx_param); 236 if (!host->dma_tx_channel) 237 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 238 } else { 239 host->dma_tx_channel = host->dma_rx_channel; 240 } 241 242 if (host->dma_rx_channel) 243 rxname = dma_chan_name(host->dma_rx_channel); 244 else 245 rxname = "none"; 246 247 if (host->dma_tx_channel) 248 txname = dma_chan_name(host->dma_tx_channel); 249 else 250 txname = "none"; 251 252 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 253 rxname, txname); 254 255 /* 256 * Limit the maximum segment size in any SG entry according to 257 * the parameters of the DMA engine device. 258 */ 259 if (host->dma_tx_channel) { 260 struct device *dev = host->dma_tx_channel->device->dev; 261 unsigned int max_seg_size = dma_get_max_seg_size(dev); 262 263 if (max_seg_size < host->mmc->max_seg_size) 264 host->mmc->max_seg_size = max_seg_size; 265 } 266 if (host->dma_rx_channel) { 267 struct device *dev = host->dma_rx_channel->device->dev; 268 unsigned int max_seg_size = dma_get_max_seg_size(dev); 269 270 if (max_seg_size < host->mmc->max_seg_size) 271 host->mmc->max_seg_size = max_seg_size; 272 } 273 } 274 275 /* 276 * This is used in __devinit or __devexit so inline it 277 * so it can be discarded. 278 */ 279 static inline void mmci_dma_release(struct mmci_host *host) 280 { 281 struct mmci_platform_data *plat = host->plat; 282 283 if (host->dma_rx_channel) 284 dma_release_channel(host->dma_rx_channel); 285 if (host->dma_tx_channel && plat->dma_tx_param) 286 dma_release_channel(host->dma_tx_channel); 287 host->dma_rx_channel = host->dma_tx_channel = NULL; 288 } 289 290 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 291 { 292 struct dma_chan *chan = host->dma_current; 293 enum dma_data_direction dir; 294 u32 status; 295 int i; 296 297 /* Wait up to 1ms for the DMA to complete */ 298 for (i = 0; ; i++) { 299 status = readl(host->base + MMCISTATUS); 300 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 301 break; 302 udelay(10); 303 } 304 305 /* 306 * Check to see whether we still have some data left in the FIFO - 307 * this catches DMA controllers which are unable to monitor the 308 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 309 * contiguous buffers. On TX, we'll get a FIFO underrun error. 310 */ 311 if (status & MCI_RXDATAAVLBLMASK) { 312 dmaengine_terminate_all(chan); 313 if (!data->error) 314 data->error = -EIO; 315 } 316 317 if (data->flags & MMC_DATA_WRITE) { 318 dir = DMA_TO_DEVICE; 319 } else { 320 dir = DMA_FROM_DEVICE; 321 } 322 323 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 324 325 /* 326 * Use of DMA with scatter-gather is impossible. 327 * Give up with DMA and switch back to PIO mode. 328 */ 329 if (status & MCI_RXDATAAVLBLMASK) { 330 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 331 mmci_dma_release(host); 332 } 333 } 334 335 static void mmci_dma_data_error(struct mmci_host *host) 336 { 337 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 338 dmaengine_terminate_all(host->dma_current); 339 } 340 341 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 342 { 343 struct variant_data *variant = host->variant; 344 struct dma_slave_config conf = { 345 .src_addr = host->phybase + MMCIFIFO, 346 .dst_addr = host->phybase + MMCIFIFO, 347 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 348 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 349 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 350 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 351 }; 352 struct mmc_data *data = host->data; 353 struct dma_chan *chan; 354 struct dma_device *device; 355 struct dma_async_tx_descriptor *desc; 356 int nr_sg; 357 358 host->dma_current = NULL; 359 360 if (data->flags & MMC_DATA_READ) { 361 conf.direction = DMA_FROM_DEVICE; 362 chan = host->dma_rx_channel; 363 } else { 364 conf.direction = DMA_TO_DEVICE; 365 chan = host->dma_tx_channel; 366 } 367 368 /* If there's no DMA channel, fall back to PIO */ 369 if (!chan) 370 return -EINVAL; 371 372 /* If less than or equal to the fifo size, don't bother with DMA */ 373 if (host->size <= variant->fifosize) 374 return -EINVAL; 375 376 device = chan->device; 377 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 378 if (nr_sg == 0) 379 return -EINVAL; 380 381 dmaengine_slave_config(chan, &conf); 382 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 383 conf.direction, DMA_CTRL_ACK); 384 if (!desc) 385 goto unmap_exit; 386 387 /* Okay, go for it. */ 388 host->dma_current = chan; 389 390 dev_vdbg(mmc_dev(host->mmc), 391 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 392 data->sg_len, data->blksz, data->blocks, data->flags); 393 dmaengine_submit(desc); 394 dma_async_issue_pending(chan); 395 396 datactrl |= MCI_DPSM_DMAENABLE; 397 398 /* Trigger the DMA transfer */ 399 writel(datactrl, host->base + MMCIDATACTRL); 400 401 /* 402 * Let the MMCI say when the data is ended and it's time 403 * to fire next DMA request. When that happens, MMCI will 404 * call mmci_data_end() 405 */ 406 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 407 host->base + MMCIMASK0); 408 return 0; 409 410 unmap_exit: 411 dmaengine_terminate_all(chan); 412 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 413 return -ENOMEM; 414 } 415 #else 416 /* Blank functions if the DMA engine is not available */ 417 static inline void mmci_dma_setup(struct mmci_host *host) 418 { 419 } 420 421 static inline void mmci_dma_release(struct mmci_host *host) 422 { 423 } 424 425 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 426 { 427 } 428 429 static inline void mmci_dma_data_error(struct mmci_host *host) 430 { 431 } 432 433 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 434 { 435 return -ENOSYS; 436 } 437 #endif 438 439 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 440 { 441 struct variant_data *variant = host->variant; 442 unsigned int datactrl, timeout, irqmask; 443 unsigned long long clks; 444 void __iomem *base; 445 int blksz_bits; 446 447 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 448 data->blksz, data->blocks, data->flags); 449 450 host->data = data; 451 host->size = data->blksz * data->blocks; 452 data->bytes_xfered = 0; 453 454 clks = (unsigned long long)data->timeout_ns * host->cclk; 455 do_div(clks, 1000000000UL); 456 457 timeout = data->timeout_clks + (unsigned int)clks; 458 459 base = host->base; 460 writel(timeout, base + MMCIDATATIMER); 461 writel(host->size, base + MMCIDATALENGTH); 462 463 blksz_bits = ffs(data->blksz) - 1; 464 BUG_ON(1 << blksz_bits != data->blksz); 465 466 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 467 468 if (data->flags & MMC_DATA_READ) 469 datactrl |= MCI_DPSM_DIRECTION; 470 471 /* 472 * Attempt to use DMA operation mode, if this 473 * should fail, fall back to PIO mode 474 */ 475 if (!mmci_dma_start_data(host, datactrl)) 476 return; 477 478 /* IRQ mode, map the SG list for CPU reading/writing */ 479 mmci_init_sg(host, data); 480 481 if (data->flags & MMC_DATA_READ) { 482 irqmask = MCI_RXFIFOHALFFULLMASK; 483 484 /* 485 * If we have less than the fifo 'half-full' threshold to 486 * transfer, trigger a PIO interrupt as soon as any data 487 * is available. 488 */ 489 if (host->size < variant->fifohalfsize) 490 irqmask |= MCI_RXDATAAVLBLMASK; 491 } else { 492 /* 493 * We don't actually need to include "FIFO empty" here 494 * since its implicit in "FIFO half empty". 495 */ 496 irqmask = MCI_TXFIFOHALFEMPTYMASK; 497 } 498 499 /* The ST Micro variants has a special bit to enable SDIO */ 500 if (variant->sdio && host->mmc->card) 501 if (mmc_card_sdio(host->mmc->card)) 502 datactrl |= MCI_ST_DPSM_SDIOEN; 503 504 writel(datactrl, base + MMCIDATACTRL); 505 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 506 mmci_set_mask1(host, irqmask); 507 } 508 509 static void 510 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 511 { 512 void __iomem *base = host->base; 513 514 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 515 cmd->opcode, cmd->arg, cmd->flags); 516 517 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 518 writel(0, base + MMCICOMMAND); 519 udelay(1); 520 } 521 522 c |= cmd->opcode | MCI_CPSM_ENABLE; 523 if (cmd->flags & MMC_RSP_PRESENT) { 524 if (cmd->flags & MMC_RSP_136) 525 c |= MCI_CPSM_LONGRSP; 526 c |= MCI_CPSM_RESPONSE; 527 } 528 if (/*interrupt*/0) 529 c |= MCI_CPSM_INTERRUPT; 530 531 host->cmd = cmd; 532 533 writel(cmd->arg, base + MMCIARGUMENT); 534 writel(c, base + MMCICOMMAND); 535 } 536 537 static void 538 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 539 unsigned int status) 540 { 541 /* First check for errors */ 542 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 543 u32 remain, success; 544 545 /* Terminate the DMA transfer */ 546 if (dma_inprogress(host)) 547 mmci_dma_data_error(host); 548 549 /* 550 * Calculate how far we are into the transfer. Note that 551 * the data counter gives the number of bytes transferred 552 * on the MMC bus, not on the host side. On reads, this 553 * can be as much as a FIFO-worth of data ahead. This 554 * matters for FIFO overruns only. 555 */ 556 remain = readl(host->base + MMCIDATACNT); 557 success = data->blksz * data->blocks - remain; 558 559 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 560 status, success); 561 if (status & MCI_DATACRCFAIL) { 562 /* Last block was not successful */ 563 success -= 1; 564 data->error = -EILSEQ; 565 } else if (status & MCI_DATATIMEOUT) { 566 data->error = -ETIMEDOUT; 567 } else if (status & MCI_TXUNDERRUN) { 568 data->error = -EIO; 569 } else if (status & MCI_RXOVERRUN) { 570 if (success > host->variant->fifosize) 571 success -= host->variant->fifosize; 572 else 573 success = 0; 574 data->error = -EIO; 575 } 576 data->bytes_xfered = round_down(success, data->blksz); 577 } 578 579 if (status & MCI_DATABLOCKEND) 580 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 581 582 if (status & MCI_DATAEND || data->error) { 583 if (dma_inprogress(host)) 584 mmci_dma_unmap(host, data); 585 mmci_stop_data(host); 586 587 if (!data->error) 588 /* The error clause is handled above, success! */ 589 data->bytes_xfered = data->blksz * data->blocks; 590 591 if (!data->stop) { 592 mmci_request_end(host, data->mrq); 593 } else { 594 mmci_start_command(host, data->stop, 0); 595 } 596 } 597 } 598 599 static void 600 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 601 unsigned int status) 602 { 603 void __iomem *base = host->base; 604 605 host->cmd = NULL; 606 607 if (status & MCI_CMDTIMEOUT) { 608 cmd->error = -ETIMEDOUT; 609 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 610 cmd->error = -EILSEQ; 611 } else { 612 cmd->resp[0] = readl(base + MMCIRESPONSE0); 613 cmd->resp[1] = readl(base + MMCIRESPONSE1); 614 cmd->resp[2] = readl(base + MMCIRESPONSE2); 615 cmd->resp[3] = readl(base + MMCIRESPONSE3); 616 } 617 618 if (!cmd->data || cmd->error) { 619 if (host->data) 620 mmci_stop_data(host); 621 mmci_request_end(host, cmd->mrq); 622 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 623 mmci_start_data(host, cmd->data); 624 } 625 } 626 627 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 628 { 629 void __iomem *base = host->base; 630 char *ptr = buffer; 631 u32 status; 632 int host_remain = host->size; 633 634 do { 635 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 636 637 if (count > remain) 638 count = remain; 639 640 if (count <= 0) 641 break; 642 643 readsl(base + MMCIFIFO, ptr, count >> 2); 644 645 ptr += count; 646 remain -= count; 647 host_remain -= count; 648 649 if (remain == 0) 650 break; 651 652 status = readl(base + MMCISTATUS); 653 } while (status & MCI_RXDATAAVLBL); 654 655 return ptr - buffer; 656 } 657 658 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 659 { 660 struct variant_data *variant = host->variant; 661 void __iomem *base = host->base; 662 char *ptr = buffer; 663 664 do { 665 unsigned int count, maxcnt; 666 667 maxcnt = status & MCI_TXFIFOEMPTY ? 668 variant->fifosize : variant->fifohalfsize; 669 count = min(remain, maxcnt); 670 671 /* 672 * The ST Micro variant for SDIO transfer sizes 673 * less then 8 bytes should have clock H/W flow 674 * control disabled. 675 */ 676 if (variant->sdio && 677 mmc_card_sdio(host->mmc->card)) { 678 if (count < 8) 679 writel(readl(host->base + MMCICLOCK) & 680 ~variant->clkreg_enable, 681 host->base + MMCICLOCK); 682 else 683 writel(readl(host->base + MMCICLOCK) | 684 variant->clkreg_enable, 685 host->base + MMCICLOCK); 686 } 687 688 /* 689 * SDIO especially may want to send something that is 690 * not divisible by 4 (as opposed to card sectors 691 * etc), and the FIFO only accept full 32-bit writes. 692 * So compensate by adding +3 on the count, a single 693 * byte become a 32bit write, 7 bytes will be two 694 * 32bit writes etc. 695 */ 696 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 697 698 ptr += count; 699 remain -= count; 700 701 if (remain == 0) 702 break; 703 704 status = readl(base + MMCISTATUS); 705 } while (status & MCI_TXFIFOHALFEMPTY); 706 707 return ptr - buffer; 708 } 709 710 /* 711 * PIO data transfer IRQ handler. 712 */ 713 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 714 { 715 struct mmci_host *host = dev_id; 716 struct sg_mapping_iter *sg_miter = &host->sg_miter; 717 struct variant_data *variant = host->variant; 718 void __iomem *base = host->base; 719 unsigned long flags; 720 u32 status; 721 722 status = readl(base + MMCISTATUS); 723 724 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 725 726 local_irq_save(flags); 727 728 do { 729 unsigned int remain, len; 730 char *buffer; 731 732 /* 733 * For write, we only need to test the half-empty flag 734 * here - if the FIFO is completely empty, then by 735 * definition it is more than half empty. 736 * 737 * For read, check for data available. 738 */ 739 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 740 break; 741 742 if (!sg_miter_next(sg_miter)) 743 break; 744 745 buffer = sg_miter->addr; 746 remain = sg_miter->length; 747 748 len = 0; 749 if (status & MCI_RXACTIVE) 750 len = mmci_pio_read(host, buffer, remain); 751 if (status & MCI_TXACTIVE) 752 len = mmci_pio_write(host, buffer, remain, status); 753 754 sg_miter->consumed = len; 755 756 host->size -= len; 757 remain -= len; 758 759 if (remain) 760 break; 761 762 status = readl(base + MMCISTATUS); 763 } while (1); 764 765 sg_miter_stop(sg_miter); 766 767 local_irq_restore(flags); 768 769 /* 770 * If we have less than the fifo 'half-full' threshold to transfer, 771 * trigger a PIO interrupt as soon as any data is available. 772 */ 773 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 774 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 775 776 /* 777 * If we run out of data, disable the data IRQs; this 778 * prevents a race where the FIFO becomes empty before 779 * the chip itself has disabled the data path, and 780 * stops us racing with our data end IRQ. 781 */ 782 if (host->size == 0) { 783 mmci_set_mask1(host, 0); 784 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 785 } 786 787 return IRQ_HANDLED; 788 } 789 790 /* 791 * Handle completion of command and data transfers. 792 */ 793 static irqreturn_t mmci_irq(int irq, void *dev_id) 794 { 795 struct mmci_host *host = dev_id; 796 u32 status; 797 int ret = 0; 798 799 spin_lock(&host->lock); 800 801 do { 802 struct mmc_command *cmd; 803 struct mmc_data *data; 804 805 status = readl(host->base + MMCISTATUS); 806 807 if (host->singleirq) { 808 if (status & readl(host->base + MMCIMASK1)) 809 mmci_pio_irq(irq, dev_id); 810 811 status &= ~MCI_IRQ1MASK; 812 } 813 814 status &= readl(host->base + MMCIMASK0); 815 writel(status, host->base + MMCICLEAR); 816 817 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 818 819 data = host->data; 820 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 821 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 822 mmci_data_irq(host, data, status); 823 824 cmd = host->cmd; 825 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 826 mmci_cmd_irq(host, cmd, status); 827 828 ret = 1; 829 } while (status); 830 831 spin_unlock(&host->lock); 832 833 return IRQ_RETVAL(ret); 834 } 835 836 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 837 { 838 struct mmci_host *host = mmc_priv(mmc); 839 unsigned long flags; 840 841 WARN_ON(host->mrq != NULL); 842 843 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 844 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 845 mrq->data->blksz); 846 mrq->cmd->error = -EINVAL; 847 mmc_request_done(mmc, mrq); 848 return; 849 } 850 851 spin_lock_irqsave(&host->lock, flags); 852 853 host->mrq = mrq; 854 855 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 856 mmci_start_data(host, mrq->data); 857 858 mmci_start_command(host, mrq->cmd, 0); 859 860 spin_unlock_irqrestore(&host->lock, flags); 861 } 862 863 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 864 { 865 struct mmci_host *host = mmc_priv(mmc); 866 u32 pwr = 0; 867 unsigned long flags; 868 int ret; 869 870 switch (ios->power_mode) { 871 case MMC_POWER_OFF: 872 if (host->vcc) 873 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 874 break; 875 case MMC_POWER_UP: 876 if (host->vcc) { 877 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 878 if (ret) { 879 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 880 /* 881 * The .set_ios() function in the mmc_host_ops 882 * struct return void, and failing to set the 883 * power should be rare so we print an error 884 * and return here. 885 */ 886 return; 887 } 888 } 889 if (host->plat->vdd_handler) 890 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 891 ios->power_mode); 892 /* The ST version does not have this, fall through to POWER_ON */ 893 if (host->hw_designer != AMBA_VENDOR_ST) { 894 pwr |= MCI_PWR_UP; 895 break; 896 } 897 case MMC_POWER_ON: 898 pwr |= MCI_PWR_ON; 899 break; 900 } 901 902 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 903 if (host->hw_designer != AMBA_VENDOR_ST) 904 pwr |= MCI_ROD; 905 else { 906 /* 907 * The ST Micro variant use the ROD bit for something 908 * else and only has OD (Open Drain). 909 */ 910 pwr |= MCI_OD; 911 } 912 } 913 914 spin_lock_irqsave(&host->lock, flags); 915 916 mmci_set_clkreg(host, ios->clock); 917 918 if (host->pwr != pwr) { 919 host->pwr = pwr; 920 writel(pwr, host->base + MMCIPOWER); 921 } 922 923 spin_unlock_irqrestore(&host->lock, flags); 924 } 925 926 static int mmci_get_ro(struct mmc_host *mmc) 927 { 928 struct mmci_host *host = mmc_priv(mmc); 929 930 if (host->gpio_wp == -ENOSYS) 931 return -ENOSYS; 932 933 return gpio_get_value_cansleep(host->gpio_wp); 934 } 935 936 static int mmci_get_cd(struct mmc_host *mmc) 937 { 938 struct mmci_host *host = mmc_priv(mmc); 939 struct mmci_platform_data *plat = host->plat; 940 unsigned int status; 941 942 if (host->gpio_cd == -ENOSYS) { 943 if (!plat->status) 944 return 1; /* Assume always present */ 945 946 status = plat->status(mmc_dev(host->mmc)); 947 } else 948 status = !!gpio_get_value_cansleep(host->gpio_cd) 949 ^ plat->cd_invert; 950 951 /* 952 * Use positive logic throughout - status is zero for no card, 953 * non-zero for card inserted. 954 */ 955 return status; 956 } 957 958 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 959 { 960 struct mmci_host *host = dev_id; 961 962 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 963 964 return IRQ_HANDLED; 965 } 966 967 static const struct mmc_host_ops mmci_ops = { 968 .request = mmci_request, 969 .set_ios = mmci_set_ios, 970 .get_ro = mmci_get_ro, 971 .get_cd = mmci_get_cd, 972 }; 973 974 static int __devinit mmci_probe(struct amba_device *dev, 975 const struct amba_id *id) 976 { 977 struct mmci_platform_data *plat = dev->dev.platform_data; 978 struct variant_data *variant = id->data; 979 struct mmci_host *host; 980 struct mmc_host *mmc; 981 int ret; 982 983 /* must have platform data */ 984 if (!plat) { 985 ret = -EINVAL; 986 goto out; 987 } 988 989 ret = amba_request_regions(dev, DRIVER_NAME); 990 if (ret) 991 goto out; 992 993 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 994 if (!mmc) { 995 ret = -ENOMEM; 996 goto rel_regions; 997 } 998 999 host = mmc_priv(mmc); 1000 host->mmc = mmc; 1001 1002 host->gpio_wp = -ENOSYS; 1003 host->gpio_cd = -ENOSYS; 1004 host->gpio_cd_irq = -1; 1005 1006 host->hw_designer = amba_manf(dev); 1007 host->hw_revision = amba_rev(dev); 1008 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1009 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1010 1011 host->clk = clk_get(&dev->dev, NULL); 1012 if (IS_ERR(host->clk)) { 1013 ret = PTR_ERR(host->clk); 1014 host->clk = NULL; 1015 goto host_free; 1016 } 1017 1018 ret = clk_enable(host->clk); 1019 if (ret) 1020 goto clk_free; 1021 1022 host->plat = plat; 1023 host->variant = variant; 1024 host->mclk = clk_get_rate(host->clk); 1025 /* 1026 * According to the spec, mclk is max 100 MHz, 1027 * so we try to adjust the clock down to this, 1028 * (if possible). 1029 */ 1030 if (host->mclk > 100000000) { 1031 ret = clk_set_rate(host->clk, 100000000); 1032 if (ret < 0) 1033 goto clk_disable; 1034 host->mclk = clk_get_rate(host->clk); 1035 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1036 host->mclk); 1037 } 1038 host->phybase = dev->res.start; 1039 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1040 if (!host->base) { 1041 ret = -ENOMEM; 1042 goto clk_disable; 1043 } 1044 1045 mmc->ops = &mmci_ops; 1046 mmc->f_min = (host->mclk + 511) / 512; 1047 /* 1048 * If the platform data supplies a maximum operating 1049 * frequency, this takes precedence. Else, we fall back 1050 * to using the module parameter, which has a (low) 1051 * default value in case it is not specified. Either 1052 * value must not exceed the clock rate into the block, 1053 * of course. 1054 */ 1055 if (plat->f_max) 1056 mmc->f_max = min(host->mclk, plat->f_max); 1057 else 1058 mmc->f_max = min(host->mclk, fmax); 1059 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1060 1061 #ifdef CONFIG_REGULATOR 1062 /* If we're using the regulator framework, try to fetch a regulator */ 1063 host->vcc = regulator_get(&dev->dev, "vmmc"); 1064 if (IS_ERR(host->vcc)) 1065 host->vcc = NULL; 1066 else { 1067 int mask = mmc_regulator_get_ocrmask(host->vcc); 1068 1069 if (mask < 0) 1070 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1071 mask); 1072 else { 1073 host->mmc->ocr_avail = (u32) mask; 1074 if (plat->ocr_mask) 1075 dev_warn(&dev->dev, 1076 "Provided ocr_mask/setpower will not be used " 1077 "(using regulator instead)\n"); 1078 } 1079 } 1080 #endif 1081 /* Fall back to platform data if no regulator is found */ 1082 if (host->vcc == NULL) 1083 mmc->ocr_avail = plat->ocr_mask; 1084 mmc->caps = plat->capabilities; 1085 1086 /* 1087 * We can do SGIO 1088 */ 1089 mmc->max_segs = NR_SG; 1090 1091 /* 1092 * Since only a certain number of bits are valid in the data length 1093 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1094 * single request. 1095 */ 1096 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1097 1098 /* 1099 * Set the maximum segment size. Since we aren't doing DMA 1100 * (yet) we are only limited by the data length register. 1101 */ 1102 mmc->max_seg_size = mmc->max_req_size; 1103 1104 /* 1105 * Block size can be up to 2048 bytes, but must be a power of two. 1106 */ 1107 mmc->max_blk_size = 2048; 1108 1109 /* 1110 * No limit on the number of blocks transferred. 1111 */ 1112 mmc->max_blk_count = mmc->max_req_size; 1113 1114 spin_lock_init(&host->lock); 1115 1116 writel(0, host->base + MMCIMASK0); 1117 writel(0, host->base + MMCIMASK1); 1118 writel(0xfff, host->base + MMCICLEAR); 1119 1120 if (gpio_is_valid(plat->gpio_cd)) { 1121 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1122 if (ret == 0) 1123 ret = gpio_direction_input(plat->gpio_cd); 1124 if (ret == 0) 1125 host->gpio_cd = plat->gpio_cd; 1126 else if (ret != -ENOSYS) 1127 goto err_gpio_cd; 1128 1129 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1130 mmci_cd_irq, 0, 1131 DRIVER_NAME " (cd)", host); 1132 if (ret >= 0) 1133 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1134 } 1135 if (gpio_is_valid(plat->gpio_wp)) { 1136 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1137 if (ret == 0) 1138 ret = gpio_direction_input(plat->gpio_wp); 1139 if (ret == 0) 1140 host->gpio_wp = plat->gpio_wp; 1141 else if (ret != -ENOSYS) 1142 goto err_gpio_wp; 1143 } 1144 1145 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1146 && host->gpio_cd_irq < 0) 1147 mmc->caps |= MMC_CAP_NEEDS_POLL; 1148 1149 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1150 if (ret) 1151 goto unmap; 1152 1153 if (dev->irq[1] == NO_IRQ) 1154 host->singleirq = true; 1155 else { 1156 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1157 DRIVER_NAME " (pio)", host); 1158 if (ret) 1159 goto irq0_free; 1160 } 1161 1162 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1163 1164 amba_set_drvdata(dev, mmc); 1165 1166 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1167 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1168 amba_rev(dev), (unsigned long long)dev->res.start, 1169 dev->irq[0], dev->irq[1]); 1170 1171 mmci_dma_setup(host); 1172 1173 mmc_add_host(mmc); 1174 1175 return 0; 1176 1177 irq0_free: 1178 free_irq(dev->irq[0], host); 1179 unmap: 1180 if (host->gpio_wp != -ENOSYS) 1181 gpio_free(host->gpio_wp); 1182 err_gpio_wp: 1183 if (host->gpio_cd_irq >= 0) 1184 free_irq(host->gpio_cd_irq, host); 1185 if (host->gpio_cd != -ENOSYS) 1186 gpio_free(host->gpio_cd); 1187 err_gpio_cd: 1188 iounmap(host->base); 1189 clk_disable: 1190 clk_disable(host->clk); 1191 clk_free: 1192 clk_put(host->clk); 1193 host_free: 1194 mmc_free_host(mmc); 1195 rel_regions: 1196 amba_release_regions(dev); 1197 out: 1198 return ret; 1199 } 1200 1201 static int __devexit mmci_remove(struct amba_device *dev) 1202 { 1203 struct mmc_host *mmc = amba_get_drvdata(dev); 1204 1205 amba_set_drvdata(dev, NULL); 1206 1207 if (mmc) { 1208 struct mmci_host *host = mmc_priv(mmc); 1209 1210 mmc_remove_host(mmc); 1211 1212 writel(0, host->base + MMCIMASK0); 1213 writel(0, host->base + MMCIMASK1); 1214 1215 writel(0, host->base + MMCICOMMAND); 1216 writel(0, host->base + MMCIDATACTRL); 1217 1218 mmci_dma_release(host); 1219 free_irq(dev->irq[0], host); 1220 if (!host->singleirq) 1221 free_irq(dev->irq[1], host); 1222 1223 if (host->gpio_wp != -ENOSYS) 1224 gpio_free(host->gpio_wp); 1225 if (host->gpio_cd_irq >= 0) 1226 free_irq(host->gpio_cd_irq, host); 1227 if (host->gpio_cd != -ENOSYS) 1228 gpio_free(host->gpio_cd); 1229 1230 iounmap(host->base); 1231 clk_disable(host->clk); 1232 clk_put(host->clk); 1233 1234 if (host->vcc) 1235 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1236 regulator_put(host->vcc); 1237 1238 mmc_free_host(mmc); 1239 1240 amba_release_regions(dev); 1241 } 1242 1243 return 0; 1244 } 1245 1246 #ifdef CONFIG_PM 1247 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1248 { 1249 struct mmc_host *mmc = amba_get_drvdata(dev); 1250 int ret = 0; 1251 1252 if (mmc) { 1253 struct mmci_host *host = mmc_priv(mmc); 1254 1255 ret = mmc_suspend_host(mmc); 1256 if (ret == 0) 1257 writel(0, host->base + MMCIMASK0); 1258 } 1259 1260 return ret; 1261 } 1262 1263 static int mmci_resume(struct amba_device *dev) 1264 { 1265 struct mmc_host *mmc = amba_get_drvdata(dev); 1266 int ret = 0; 1267 1268 if (mmc) { 1269 struct mmci_host *host = mmc_priv(mmc); 1270 1271 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1272 1273 ret = mmc_resume_host(mmc); 1274 } 1275 1276 return ret; 1277 } 1278 #else 1279 #define mmci_suspend NULL 1280 #define mmci_resume NULL 1281 #endif 1282 1283 static struct amba_id mmci_ids[] = { 1284 { 1285 .id = 0x00041180, 1286 .mask = 0xff0fffff, 1287 .data = &variant_arm, 1288 }, 1289 { 1290 .id = 0x01041180, 1291 .mask = 0xff0fffff, 1292 .data = &variant_arm_extended_fifo, 1293 }, 1294 { 1295 .id = 0x00041181, 1296 .mask = 0x000fffff, 1297 .data = &variant_arm, 1298 }, 1299 /* ST Micro variants */ 1300 { 1301 .id = 0x00180180, 1302 .mask = 0x00ffffff, 1303 .data = &variant_u300, 1304 }, 1305 { 1306 .id = 0x00280180, 1307 .mask = 0x00ffffff, 1308 .data = &variant_u300, 1309 }, 1310 { 1311 .id = 0x00480180, 1312 .mask = 0x00ffffff, 1313 .data = &variant_ux500, 1314 }, 1315 { 0, 0 }, 1316 }; 1317 1318 static struct amba_driver mmci_driver = { 1319 .drv = { 1320 .name = DRIVER_NAME, 1321 }, 1322 .probe = mmci_probe, 1323 .remove = __devexit_p(mmci_remove), 1324 .suspend = mmci_suspend, 1325 .resume = mmci_resume, 1326 .id_table = mmci_ids, 1327 }; 1328 1329 static int __init mmci_init(void) 1330 { 1331 return amba_driver_register(&mmci_driver); 1332 } 1333 1334 static void __exit mmci_exit(void) 1335 { 1336 amba_driver_unregister(&mmci_driver); 1337 } 1338 1339 module_init(mmci_init); 1340 module_exit(mmci_exit); 1341 module_param(fmax, uint, 0444); 1342 1343 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1344 MODULE_LICENSE("GPL"); 1345