1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 33 #include <asm/div64.h> 34 #include <asm/io.h> 35 #include <asm/sizes.h> 36 37 #include "mmci.h" 38 39 #define DRIVER_NAME "mmci-pl18x" 40 41 static unsigned int fmax = 515633; 42 43 /** 44 * struct variant_data - MMCI variant-specific quirks 45 * @clkreg: default value for MCICLOCK register 46 * @clkreg_enable: enable value for MMCICLOCK register 47 * @datalength_bits: number of bits in the MMCIDATALENGTH register 48 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 49 * is asserted (likewise for RX) 50 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 51 * is asserted (likewise for RX) 52 * @sdio: variant supports SDIO 53 * @st_clkdiv: true if using a ST-specific clock divider algorithm 54 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 55 */ 56 struct variant_data { 57 unsigned int clkreg; 58 unsigned int clkreg_enable; 59 unsigned int datalength_bits; 60 unsigned int fifosize; 61 unsigned int fifohalfsize; 62 bool sdio; 63 bool st_clkdiv; 64 bool blksz_datactrl16; 65 }; 66 67 static struct variant_data variant_arm = { 68 .fifosize = 16 * 4, 69 .fifohalfsize = 8 * 4, 70 .datalength_bits = 16, 71 }; 72 73 static struct variant_data variant_arm_extended_fifo = { 74 .fifosize = 128 * 4, 75 .fifohalfsize = 64 * 4, 76 .datalength_bits = 16, 77 }; 78 79 static struct variant_data variant_u300 = { 80 .fifosize = 16 * 4, 81 .fifohalfsize = 8 * 4, 82 .clkreg_enable = MCI_ST_U300_HWFCEN, 83 .datalength_bits = 16, 84 .sdio = true, 85 }; 86 87 static struct variant_data variant_ux500 = { 88 .fifosize = 30 * 4, 89 .fifohalfsize = 8 * 4, 90 .clkreg = MCI_CLK_ENABLE, 91 .clkreg_enable = MCI_ST_UX500_HWFCEN, 92 .datalength_bits = 24, 93 .sdio = true, 94 .st_clkdiv = true, 95 }; 96 97 static struct variant_data variant_ux500v2 = { 98 .fifosize = 30 * 4, 99 .fifohalfsize = 8 * 4, 100 .clkreg = MCI_CLK_ENABLE, 101 .clkreg_enable = MCI_ST_UX500_HWFCEN, 102 .datalength_bits = 24, 103 .sdio = true, 104 .st_clkdiv = true, 105 .blksz_datactrl16 = true, 106 }; 107 108 /* 109 * This must be called with host->lock held 110 */ 111 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 112 { 113 struct variant_data *variant = host->variant; 114 u32 clk = variant->clkreg; 115 116 if (desired) { 117 if (desired >= host->mclk) { 118 clk = MCI_CLK_BYPASS; 119 if (variant->st_clkdiv) 120 clk |= MCI_ST_UX500_NEG_EDGE; 121 host->cclk = host->mclk; 122 } else if (variant->st_clkdiv) { 123 /* 124 * DB8500 TRM says f = mclk / (clkdiv + 2) 125 * => clkdiv = (mclk / f) - 2 126 * Round the divider up so we don't exceed the max 127 * frequency 128 */ 129 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 130 if (clk >= 256) 131 clk = 255; 132 host->cclk = host->mclk / (clk + 2); 133 } else { 134 /* 135 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 136 * => clkdiv = mclk / (2 * f) - 1 137 */ 138 clk = host->mclk / (2 * desired) - 1; 139 if (clk >= 256) 140 clk = 255; 141 host->cclk = host->mclk / (2 * (clk + 1)); 142 } 143 144 clk |= variant->clkreg_enable; 145 clk |= MCI_CLK_ENABLE; 146 /* This hasn't proven to be worthwhile */ 147 /* clk |= MCI_CLK_PWRSAVE; */ 148 } 149 150 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 151 clk |= MCI_4BIT_BUS; 152 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 153 clk |= MCI_ST_8BIT_BUS; 154 155 writel(clk, host->base + MMCICLOCK); 156 } 157 158 static void 159 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 160 { 161 writel(0, host->base + MMCICOMMAND); 162 163 BUG_ON(host->data); 164 165 host->mrq = NULL; 166 host->cmd = NULL; 167 168 /* 169 * Need to drop the host lock here; mmc_request_done may call 170 * back into the driver... 171 */ 172 spin_unlock(&host->lock); 173 mmc_request_done(host->mmc, mrq); 174 spin_lock(&host->lock); 175 } 176 177 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 178 { 179 void __iomem *base = host->base; 180 181 if (host->singleirq) { 182 unsigned int mask0 = readl(base + MMCIMASK0); 183 184 mask0 &= ~MCI_IRQ1MASK; 185 mask0 |= mask; 186 187 writel(mask0, base + MMCIMASK0); 188 } 189 190 writel(mask, base + MMCIMASK1); 191 } 192 193 static void mmci_stop_data(struct mmci_host *host) 194 { 195 writel(0, host->base + MMCIDATACTRL); 196 mmci_set_mask1(host, 0); 197 host->data = NULL; 198 } 199 200 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 201 { 202 unsigned int flags = SG_MITER_ATOMIC; 203 204 if (data->flags & MMC_DATA_READ) 205 flags |= SG_MITER_TO_SG; 206 else 207 flags |= SG_MITER_FROM_SG; 208 209 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 210 } 211 212 /* 213 * All the DMA operation mode stuff goes inside this ifdef. 214 * This assumes that you have a generic DMA device interface, 215 * no custom DMA interfaces are supported. 216 */ 217 #ifdef CONFIG_DMA_ENGINE 218 static void __devinit mmci_dma_setup(struct mmci_host *host) 219 { 220 struct mmci_platform_data *plat = host->plat; 221 const char *rxname, *txname; 222 dma_cap_mask_t mask; 223 224 if (!plat || !plat->dma_filter) { 225 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 226 return; 227 } 228 229 /* Try to acquire a generic DMA engine slave channel */ 230 dma_cap_zero(mask); 231 dma_cap_set(DMA_SLAVE, mask); 232 233 /* 234 * If only an RX channel is specified, the driver will 235 * attempt to use it bidirectionally, however if it is 236 * is specified but cannot be located, DMA will be disabled. 237 */ 238 if (plat->dma_rx_param) { 239 host->dma_rx_channel = dma_request_channel(mask, 240 plat->dma_filter, 241 plat->dma_rx_param); 242 /* E.g if no DMA hardware is present */ 243 if (!host->dma_rx_channel) 244 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 245 } 246 247 if (plat->dma_tx_param) { 248 host->dma_tx_channel = dma_request_channel(mask, 249 plat->dma_filter, 250 plat->dma_tx_param); 251 if (!host->dma_tx_channel) 252 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 253 } else { 254 host->dma_tx_channel = host->dma_rx_channel; 255 } 256 257 if (host->dma_rx_channel) 258 rxname = dma_chan_name(host->dma_rx_channel); 259 else 260 rxname = "none"; 261 262 if (host->dma_tx_channel) 263 txname = dma_chan_name(host->dma_tx_channel); 264 else 265 txname = "none"; 266 267 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 268 rxname, txname); 269 270 /* 271 * Limit the maximum segment size in any SG entry according to 272 * the parameters of the DMA engine device. 273 */ 274 if (host->dma_tx_channel) { 275 struct device *dev = host->dma_tx_channel->device->dev; 276 unsigned int max_seg_size = dma_get_max_seg_size(dev); 277 278 if (max_seg_size < host->mmc->max_seg_size) 279 host->mmc->max_seg_size = max_seg_size; 280 } 281 if (host->dma_rx_channel) { 282 struct device *dev = host->dma_rx_channel->device->dev; 283 unsigned int max_seg_size = dma_get_max_seg_size(dev); 284 285 if (max_seg_size < host->mmc->max_seg_size) 286 host->mmc->max_seg_size = max_seg_size; 287 } 288 } 289 290 /* 291 * This is used in __devinit or __devexit so inline it 292 * so it can be discarded. 293 */ 294 static inline void mmci_dma_release(struct mmci_host *host) 295 { 296 struct mmci_platform_data *plat = host->plat; 297 298 if (host->dma_rx_channel) 299 dma_release_channel(host->dma_rx_channel); 300 if (host->dma_tx_channel && plat->dma_tx_param) 301 dma_release_channel(host->dma_tx_channel); 302 host->dma_rx_channel = host->dma_tx_channel = NULL; 303 } 304 305 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 306 { 307 struct dma_chan *chan = host->dma_current; 308 enum dma_data_direction dir; 309 u32 status; 310 int i; 311 312 /* Wait up to 1ms for the DMA to complete */ 313 for (i = 0; ; i++) { 314 status = readl(host->base + MMCISTATUS); 315 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 316 break; 317 udelay(10); 318 } 319 320 /* 321 * Check to see whether we still have some data left in the FIFO - 322 * this catches DMA controllers which are unable to monitor the 323 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 324 * contiguous buffers. On TX, we'll get a FIFO underrun error. 325 */ 326 if (status & MCI_RXDATAAVLBLMASK) { 327 dmaengine_terminate_all(chan); 328 if (!data->error) 329 data->error = -EIO; 330 } 331 332 if (data->flags & MMC_DATA_WRITE) { 333 dir = DMA_TO_DEVICE; 334 } else { 335 dir = DMA_FROM_DEVICE; 336 } 337 338 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 339 340 /* 341 * Use of DMA with scatter-gather is impossible. 342 * Give up with DMA and switch back to PIO mode. 343 */ 344 if (status & MCI_RXDATAAVLBLMASK) { 345 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 346 mmci_dma_release(host); 347 } 348 } 349 350 static void mmci_dma_data_error(struct mmci_host *host) 351 { 352 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 353 dmaengine_terminate_all(host->dma_current); 354 } 355 356 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 357 { 358 struct variant_data *variant = host->variant; 359 struct dma_slave_config conf = { 360 .src_addr = host->phybase + MMCIFIFO, 361 .dst_addr = host->phybase + MMCIFIFO, 362 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 363 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 364 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 365 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 366 }; 367 struct mmc_data *data = host->data; 368 struct dma_chan *chan; 369 struct dma_device *device; 370 struct dma_async_tx_descriptor *desc; 371 int nr_sg; 372 373 host->dma_current = NULL; 374 375 if (data->flags & MMC_DATA_READ) { 376 conf.direction = DMA_FROM_DEVICE; 377 chan = host->dma_rx_channel; 378 } else { 379 conf.direction = DMA_TO_DEVICE; 380 chan = host->dma_tx_channel; 381 } 382 383 /* If there's no DMA channel, fall back to PIO */ 384 if (!chan) 385 return -EINVAL; 386 387 /* If less than or equal to the fifo size, don't bother with DMA */ 388 if (host->size <= variant->fifosize) 389 return -EINVAL; 390 391 device = chan->device; 392 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 393 if (nr_sg == 0) 394 return -EINVAL; 395 396 dmaengine_slave_config(chan, &conf); 397 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 398 conf.direction, DMA_CTRL_ACK); 399 if (!desc) 400 goto unmap_exit; 401 402 /* Okay, go for it. */ 403 host->dma_current = chan; 404 405 dev_vdbg(mmc_dev(host->mmc), 406 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 407 data->sg_len, data->blksz, data->blocks, data->flags); 408 dmaengine_submit(desc); 409 dma_async_issue_pending(chan); 410 411 datactrl |= MCI_DPSM_DMAENABLE; 412 413 /* Trigger the DMA transfer */ 414 writel(datactrl, host->base + MMCIDATACTRL); 415 416 /* 417 * Let the MMCI say when the data is ended and it's time 418 * to fire next DMA request. When that happens, MMCI will 419 * call mmci_data_end() 420 */ 421 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 422 host->base + MMCIMASK0); 423 return 0; 424 425 unmap_exit: 426 dmaengine_terminate_all(chan); 427 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 428 return -ENOMEM; 429 } 430 #else 431 /* Blank functions if the DMA engine is not available */ 432 static inline void mmci_dma_setup(struct mmci_host *host) 433 { 434 } 435 436 static inline void mmci_dma_release(struct mmci_host *host) 437 { 438 } 439 440 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 441 { 442 } 443 444 static inline void mmci_dma_data_error(struct mmci_host *host) 445 { 446 } 447 448 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 449 { 450 return -ENOSYS; 451 } 452 #endif 453 454 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 455 { 456 struct variant_data *variant = host->variant; 457 unsigned int datactrl, timeout, irqmask; 458 unsigned long long clks; 459 void __iomem *base; 460 int blksz_bits; 461 462 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 463 data->blksz, data->blocks, data->flags); 464 465 host->data = data; 466 host->size = data->blksz * data->blocks; 467 data->bytes_xfered = 0; 468 469 clks = (unsigned long long)data->timeout_ns * host->cclk; 470 do_div(clks, 1000000000UL); 471 472 timeout = data->timeout_clks + (unsigned int)clks; 473 474 base = host->base; 475 writel(timeout, base + MMCIDATATIMER); 476 writel(host->size, base + MMCIDATALENGTH); 477 478 blksz_bits = ffs(data->blksz) - 1; 479 BUG_ON(1 << blksz_bits != data->blksz); 480 481 if (variant->blksz_datactrl16) 482 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 483 else 484 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 485 486 if (data->flags & MMC_DATA_READ) 487 datactrl |= MCI_DPSM_DIRECTION; 488 489 /* 490 * Attempt to use DMA operation mode, if this 491 * should fail, fall back to PIO mode 492 */ 493 if (!mmci_dma_start_data(host, datactrl)) 494 return; 495 496 /* IRQ mode, map the SG list for CPU reading/writing */ 497 mmci_init_sg(host, data); 498 499 if (data->flags & MMC_DATA_READ) { 500 irqmask = MCI_RXFIFOHALFFULLMASK; 501 502 /* 503 * If we have less than the fifo 'half-full' threshold to 504 * transfer, trigger a PIO interrupt as soon as any data 505 * is available. 506 */ 507 if (host->size < variant->fifohalfsize) 508 irqmask |= MCI_RXDATAAVLBLMASK; 509 } else { 510 /* 511 * We don't actually need to include "FIFO empty" here 512 * since its implicit in "FIFO half empty". 513 */ 514 irqmask = MCI_TXFIFOHALFEMPTYMASK; 515 } 516 517 /* The ST Micro variants has a special bit to enable SDIO */ 518 if (variant->sdio && host->mmc->card) 519 if (mmc_card_sdio(host->mmc->card)) 520 datactrl |= MCI_ST_DPSM_SDIOEN; 521 522 writel(datactrl, base + MMCIDATACTRL); 523 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 524 mmci_set_mask1(host, irqmask); 525 } 526 527 static void 528 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 529 { 530 void __iomem *base = host->base; 531 532 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 533 cmd->opcode, cmd->arg, cmd->flags); 534 535 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 536 writel(0, base + MMCICOMMAND); 537 udelay(1); 538 } 539 540 c |= cmd->opcode | MCI_CPSM_ENABLE; 541 if (cmd->flags & MMC_RSP_PRESENT) { 542 if (cmd->flags & MMC_RSP_136) 543 c |= MCI_CPSM_LONGRSP; 544 c |= MCI_CPSM_RESPONSE; 545 } 546 if (/*interrupt*/0) 547 c |= MCI_CPSM_INTERRUPT; 548 549 host->cmd = cmd; 550 551 writel(cmd->arg, base + MMCIARGUMENT); 552 writel(c, base + MMCICOMMAND); 553 } 554 555 static void 556 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 557 unsigned int status) 558 { 559 /* First check for errors */ 560 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 561 u32 remain, success; 562 563 /* Terminate the DMA transfer */ 564 if (dma_inprogress(host)) 565 mmci_dma_data_error(host); 566 567 /* 568 * Calculate how far we are into the transfer. Note that 569 * the data counter gives the number of bytes transferred 570 * on the MMC bus, not on the host side. On reads, this 571 * can be as much as a FIFO-worth of data ahead. This 572 * matters for FIFO overruns only. 573 */ 574 remain = readl(host->base + MMCIDATACNT); 575 success = data->blksz * data->blocks - remain; 576 577 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 578 status, success); 579 if (status & MCI_DATACRCFAIL) { 580 /* Last block was not successful */ 581 success -= 1; 582 data->error = -EILSEQ; 583 } else if (status & MCI_DATATIMEOUT) { 584 data->error = -ETIMEDOUT; 585 } else if (status & MCI_TXUNDERRUN) { 586 data->error = -EIO; 587 } else if (status & MCI_RXOVERRUN) { 588 if (success > host->variant->fifosize) 589 success -= host->variant->fifosize; 590 else 591 success = 0; 592 data->error = -EIO; 593 } 594 data->bytes_xfered = round_down(success, data->blksz); 595 } 596 597 if (status & MCI_DATABLOCKEND) 598 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 599 600 if (status & MCI_DATAEND || data->error) { 601 if (dma_inprogress(host)) 602 mmci_dma_unmap(host, data); 603 mmci_stop_data(host); 604 605 if (!data->error) 606 /* The error clause is handled above, success! */ 607 data->bytes_xfered = data->blksz * data->blocks; 608 609 if (!data->stop) { 610 mmci_request_end(host, data->mrq); 611 } else { 612 mmci_start_command(host, data->stop, 0); 613 } 614 } 615 } 616 617 static void 618 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 619 unsigned int status) 620 { 621 void __iomem *base = host->base; 622 623 host->cmd = NULL; 624 625 if (status & MCI_CMDTIMEOUT) { 626 cmd->error = -ETIMEDOUT; 627 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 628 cmd->error = -EILSEQ; 629 } else { 630 cmd->resp[0] = readl(base + MMCIRESPONSE0); 631 cmd->resp[1] = readl(base + MMCIRESPONSE1); 632 cmd->resp[2] = readl(base + MMCIRESPONSE2); 633 cmd->resp[3] = readl(base + MMCIRESPONSE3); 634 } 635 636 if (!cmd->data || cmd->error) { 637 if (host->data) 638 mmci_stop_data(host); 639 mmci_request_end(host, cmd->mrq); 640 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 641 mmci_start_data(host, cmd->data); 642 } 643 } 644 645 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 646 { 647 void __iomem *base = host->base; 648 char *ptr = buffer; 649 u32 status; 650 int host_remain = host->size; 651 652 do { 653 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 654 655 if (count > remain) 656 count = remain; 657 658 if (count <= 0) 659 break; 660 661 readsl(base + MMCIFIFO, ptr, count >> 2); 662 663 ptr += count; 664 remain -= count; 665 host_remain -= count; 666 667 if (remain == 0) 668 break; 669 670 status = readl(base + MMCISTATUS); 671 } while (status & MCI_RXDATAAVLBL); 672 673 return ptr - buffer; 674 } 675 676 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 677 { 678 struct variant_data *variant = host->variant; 679 void __iomem *base = host->base; 680 char *ptr = buffer; 681 682 do { 683 unsigned int count, maxcnt; 684 685 maxcnt = status & MCI_TXFIFOEMPTY ? 686 variant->fifosize : variant->fifohalfsize; 687 count = min(remain, maxcnt); 688 689 /* 690 * The ST Micro variant for SDIO transfer sizes 691 * less then 8 bytes should have clock H/W flow 692 * control disabled. 693 */ 694 if (variant->sdio && 695 mmc_card_sdio(host->mmc->card)) { 696 if (count < 8) 697 writel(readl(host->base + MMCICLOCK) & 698 ~variant->clkreg_enable, 699 host->base + MMCICLOCK); 700 else 701 writel(readl(host->base + MMCICLOCK) | 702 variant->clkreg_enable, 703 host->base + MMCICLOCK); 704 } 705 706 /* 707 * SDIO especially may want to send something that is 708 * not divisible by 4 (as opposed to card sectors 709 * etc), and the FIFO only accept full 32-bit writes. 710 * So compensate by adding +3 on the count, a single 711 * byte become a 32bit write, 7 bytes will be two 712 * 32bit writes etc. 713 */ 714 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 715 716 ptr += count; 717 remain -= count; 718 719 if (remain == 0) 720 break; 721 722 status = readl(base + MMCISTATUS); 723 } while (status & MCI_TXFIFOHALFEMPTY); 724 725 return ptr - buffer; 726 } 727 728 /* 729 * PIO data transfer IRQ handler. 730 */ 731 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 732 { 733 struct mmci_host *host = dev_id; 734 struct sg_mapping_iter *sg_miter = &host->sg_miter; 735 struct variant_data *variant = host->variant; 736 void __iomem *base = host->base; 737 unsigned long flags; 738 u32 status; 739 740 status = readl(base + MMCISTATUS); 741 742 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 743 744 local_irq_save(flags); 745 746 do { 747 unsigned int remain, len; 748 char *buffer; 749 750 /* 751 * For write, we only need to test the half-empty flag 752 * here - if the FIFO is completely empty, then by 753 * definition it is more than half empty. 754 * 755 * For read, check for data available. 756 */ 757 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 758 break; 759 760 if (!sg_miter_next(sg_miter)) 761 break; 762 763 buffer = sg_miter->addr; 764 remain = sg_miter->length; 765 766 len = 0; 767 if (status & MCI_RXACTIVE) 768 len = mmci_pio_read(host, buffer, remain); 769 if (status & MCI_TXACTIVE) 770 len = mmci_pio_write(host, buffer, remain, status); 771 772 sg_miter->consumed = len; 773 774 host->size -= len; 775 remain -= len; 776 777 if (remain) 778 break; 779 780 status = readl(base + MMCISTATUS); 781 } while (1); 782 783 sg_miter_stop(sg_miter); 784 785 local_irq_restore(flags); 786 787 /* 788 * If we have less than the fifo 'half-full' threshold to transfer, 789 * trigger a PIO interrupt as soon as any data is available. 790 */ 791 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 792 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 793 794 /* 795 * If we run out of data, disable the data IRQs; this 796 * prevents a race where the FIFO becomes empty before 797 * the chip itself has disabled the data path, and 798 * stops us racing with our data end IRQ. 799 */ 800 if (host->size == 0) { 801 mmci_set_mask1(host, 0); 802 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 803 } 804 805 return IRQ_HANDLED; 806 } 807 808 /* 809 * Handle completion of command and data transfers. 810 */ 811 static irqreturn_t mmci_irq(int irq, void *dev_id) 812 { 813 struct mmci_host *host = dev_id; 814 u32 status; 815 int ret = 0; 816 817 spin_lock(&host->lock); 818 819 do { 820 struct mmc_command *cmd; 821 struct mmc_data *data; 822 823 status = readl(host->base + MMCISTATUS); 824 825 if (host->singleirq) { 826 if (status & readl(host->base + MMCIMASK1)) 827 mmci_pio_irq(irq, dev_id); 828 829 status &= ~MCI_IRQ1MASK; 830 } 831 832 status &= readl(host->base + MMCIMASK0); 833 writel(status, host->base + MMCICLEAR); 834 835 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 836 837 data = host->data; 838 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 839 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 840 mmci_data_irq(host, data, status); 841 842 cmd = host->cmd; 843 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 844 mmci_cmd_irq(host, cmd, status); 845 846 ret = 1; 847 } while (status); 848 849 spin_unlock(&host->lock); 850 851 return IRQ_RETVAL(ret); 852 } 853 854 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 855 { 856 struct mmci_host *host = mmc_priv(mmc); 857 unsigned long flags; 858 859 WARN_ON(host->mrq != NULL); 860 861 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 862 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 863 mrq->data->blksz); 864 mrq->cmd->error = -EINVAL; 865 mmc_request_done(mmc, mrq); 866 return; 867 } 868 869 spin_lock_irqsave(&host->lock, flags); 870 871 host->mrq = mrq; 872 873 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 874 mmci_start_data(host, mrq->data); 875 876 mmci_start_command(host, mrq->cmd, 0); 877 878 spin_unlock_irqrestore(&host->lock, flags); 879 } 880 881 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 882 { 883 struct mmci_host *host = mmc_priv(mmc); 884 u32 pwr = 0; 885 unsigned long flags; 886 int ret; 887 888 switch (ios->power_mode) { 889 case MMC_POWER_OFF: 890 if (host->vcc) 891 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 892 break; 893 case MMC_POWER_UP: 894 if (host->vcc) { 895 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 896 if (ret) { 897 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 898 /* 899 * The .set_ios() function in the mmc_host_ops 900 * struct return void, and failing to set the 901 * power should be rare so we print an error 902 * and return here. 903 */ 904 return; 905 } 906 } 907 if (host->plat->vdd_handler) 908 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 909 ios->power_mode); 910 /* The ST version does not have this, fall through to POWER_ON */ 911 if (host->hw_designer != AMBA_VENDOR_ST) { 912 pwr |= MCI_PWR_UP; 913 break; 914 } 915 case MMC_POWER_ON: 916 pwr |= MCI_PWR_ON; 917 break; 918 } 919 920 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 921 if (host->hw_designer != AMBA_VENDOR_ST) 922 pwr |= MCI_ROD; 923 else { 924 /* 925 * The ST Micro variant use the ROD bit for something 926 * else and only has OD (Open Drain). 927 */ 928 pwr |= MCI_OD; 929 } 930 } 931 932 spin_lock_irqsave(&host->lock, flags); 933 934 mmci_set_clkreg(host, ios->clock); 935 936 if (host->pwr != pwr) { 937 host->pwr = pwr; 938 writel(pwr, host->base + MMCIPOWER); 939 } 940 941 spin_unlock_irqrestore(&host->lock, flags); 942 } 943 944 static int mmci_get_ro(struct mmc_host *mmc) 945 { 946 struct mmci_host *host = mmc_priv(mmc); 947 948 if (host->gpio_wp == -ENOSYS) 949 return -ENOSYS; 950 951 return gpio_get_value_cansleep(host->gpio_wp); 952 } 953 954 static int mmci_get_cd(struct mmc_host *mmc) 955 { 956 struct mmci_host *host = mmc_priv(mmc); 957 struct mmci_platform_data *plat = host->plat; 958 unsigned int status; 959 960 if (host->gpio_cd == -ENOSYS) { 961 if (!plat->status) 962 return 1; /* Assume always present */ 963 964 status = plat->status(mmc_dev(host->mmc)); 965 } else 966 status = !!gpio_get_value_cansleep(host->gpio_cd) 967 ^ plat->cd_invert; 968 969 /* 970 * Use positive logic throughout - status is zero for no card, 971 * non-zero for card inserted. 972 */ 973 return status; 974 } 975 976 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 977 { 978 struct mmci_host *host = dev_id; 979 980 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 981 982 return IRQ_HANDLED; 983 } 984 985 static const struct mmc_host_ops mmci_ops = { 986 .request = mmci_request, 987 .set_ios = mmci_set_ios, 988 .get_ro = mmci_get_ro, 989 .get_cd = mmci_get_cd, 990 }; 991 992 static int __devinit mmci_probe(struct amba_device *dev, 993 const struct amba_id *id) 994 { 995 struct mmci_platform_data *plat = dev->dev.platform_data; 996 struct variant_data *variant = id->data; 997 struct mmci_host *host; 998 struct mmc_host *mmc; 999 int ret; 1000 1001 /* must have platform data */ 1002 if (!plat) { 1003 ret = -EINVAL; 1004 goto out; 1005 } 1006 1007 ret = amba_request_regions(dev, DRIVER_NAME); 1008 if (ret) 1009 goto out; 1010 1011 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1012 if (!mmc) { 1013 ret = -ENOMEM; 1014 goto rel_regions; 1015 } 1016 1017 host = mmc_priv(mmc); 1018 host->mmc = mmc; 1019 1020 host->gpio_wp = -ENOSYS; 1021 host->gpio_cd = -ENOSYS; 1022 host->gpio_cd_irq = -1; 1023 1024 host->hw_designer = amba_manf(dev); 1025 host->hw_revision = amba_rev(dev); 1026 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1027 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1028 1029 host->clk = clk_get(&dev->dev, NULL); 1030 if (IS_ERR(host->clk)) { 1031 ret = PTR_ERR(host->clk); 1032 host->clk = NULL; 1033 goto host_free; 1034 } 1035 1036 ret = clk_enable(host->clk); 1037 if (ret) 1038 goto clk_free; 1039 1040 host->plat = plat; 1041 host->variant = variant; 1042 host->mclk = clk_get_rate(host->clk); 1043 /* 1044 * According to the spec, mclk is max 100 MHz, 1045 * so we try to adjust the clock down to this, 1046 * (if possible). 1047 */ 1048 if (host->mclk > 100000000) { 1049 ret = clk_set_rate(host->clk, 100000000); 1050 if (ret < 0) 1051 goto clk_disable; 1052 host->mclk = clk_get_rate(host->clk); 1053 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1054 host->mclk); 1055 } 1056 host->phybase = dev->res.start; 1057 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1058 if (!host->base) { 1059 ret = -ENOMEM; 1060 goto clk_disable; 1061 } 1062 1063 mmc->ops = &mmci_ops; 1064 mmc->f_min = (host->mclk + 511) / 512; 1065 /* 1066 * If the platform data supplies a maximum operating 1067 * frequency, this takes precedence. Else, we fall back 1068 * to using the module parameter, which has a (low) 1069 * default value in case it is not specified. Either 1070 * value must not exceed the clock rate into the block, 1071 * of course. 1072 */ 1073 if (plat->f_max) 1074 mmc->f_max = min(host->mclk, plat->f_max); 1075 else 1076 mmc->f_max = min(host->mclk, fmax); 1077 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1078 1079 #ifdef CONFIG_REGULATOR 1080 /* If we're using the regulator framework, try to fetch a regulator */ 1081 host->vcc = regulator_get(&dev->dev, "vmmc"); 1082 if (IS_ERR(host->vcc)) 1083 host->vcc = NULL; 1084 else { 1085 int mask = mmc_regulator_get_ocrmask(host->vcc); 1086 1087 if (mask < 0) 1088 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1089 mask); 1090 else { 1091 host->mmc->ocr_avail = (u32) mask; 1092 if (plat->ocr_mask) 1093 dev_warn(&dev->dev, 1094 "Provided ocr_mask/setpower will not be used " 1095 "(using regulator instead)\n"); 1096 } 1097 } 1098 #endif 1099 /* Fall back to platform data if no regulator is found */ 1100 if (host->vcc == NULL) 1101 mmc->ocr_avail = plat->ocr_mask; 1102 mmc->caps = plat->capabilities; 1103 1104 /* 1105 * We can do SGIO 1106 */ 1107 mmc->max_segs = NR_SG; 1108 1109 /* 1110 * Since only a certain number of bits are valid in the data length 1111 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1112 * single request. 1113 */ 1114 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1115 1116 /* 1117 * Set the maximum segment size. Since we aren't doing DMA 1118 * (yet) we are only limited by the data length register. 1119 */ 1120 mmc->max_seg_size = mmc->max_req_size; 1121 1122 /* 1123 * Block size can be up to 2048 bytes, but must be a power of two. 1124 */ 1125 mmc->max_blk_size = 2048; 1126 1127 /* 1128 * No limit on the number of blocks transferred. 1129 */ 1130 mmc->max_blk_count = mmc->max_req_size; 1131 1132 spin_lock_init(&host->lock); 1133 1134 writel(0, host->base + MMCIMASK0); 1135 writel(0, host->base + MMCIMASK1); 1136 writel(0xfff, host->base + MMCICLEAR); 1137 1138 if (gpio_is_valid(plat->gpio_cd)) { 1139 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1140 if (ret == 0) 1141 ret = gpio_direction_input(plat->gpio_cd); 1142 if (ret == 0) 1143 host->gpio_cd = plat->gpio_cd; 1144 else if (ret != -ENOSYS) 1145 goto err_gpio_cd; 1146 1147 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1148 mmci_cd_irq, 0, 1149 DRIVER_NAME " (cd)", host); 1150 if (ret >= 0) 1151 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1152 } 1153 if (gpio_is_valid(plat->gpio_wp)) { 1154 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1155 if (ret == 0) 1156 ret = gpio_direction_input(plat->gpio_wp); 1157 if (ret == 0) 1158 host->gpio_wp = plat->gpio_wp; 1159 else if (ret != -ENOSYS) 1160 goto err_gpio_wp; 1161 } 1162 1163 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1164 && host->gpio_cd_irq < 0) 1165 mmc->caps |= MMC_CAP_NEEDS_POLL; 1166 1167 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1168 if (ret) 1169 goto unmap; 1170 1171 if (dev->irq[1] == NO_IRQ) 1172 host->singleirq = true; 1173 else { 1174 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1175 DRIVER_NAME " (pio)", host); 1176 if (ret) 1177 goto irq0_free; 1178 } 1179 1180 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1181 1182 amba_set_drvdata(dev, mmc); 1183 1184 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1185 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1186 amba_rev(dev), (unsigned long long)dev->res.start, 1187 dev->irq[0], dev->irq[1]); 1188 1189 mmci_dma_setup(host); 1190 1191 mmc_add_host(mmc); 1192 1193 return 0; 1194 1195 irq0_free: 1196 free_irq(dev->irq[0], host); 1197 unmap: 1198 if (host->gpio_wp != -ENOSYS) 1199 gpio_free(host->gpio_wp); 1200 err_gpio_wp: 1201 if (host->gpio_cd_irq >= 0) 1202 free_irq(host->gpio_cd_irq, host); 1203 if (host->gpio_cd != -ENOSYS) 1204 gpio_free(host->gpio_cd); 1205 err_gpio_cd: 1206 iounmap(host->base); 1207 clk_disable: 1208 clk_disable(host->clk); 1209 clk_free: 1210 clk_put(host->clk); 1211 host_free: 1212 mmc_free_host(mmc); 1213 rel_regions: 1214 amba_release_regions(dev); 1215 out: 1216 return ret; 1217 } 1218 1219 static int __devexit mmci_remove(struct amba_device *dev) 1220 { 1221 struct mmc_host *mmc = amba_get_drvdata(dev); 1222 1223 amba_set_drvdata(dev, NULL); 1224 1225 if (mmc) { 1226 struct mmci_host *host = mmc_priv(mmc); 1227 1228 mmc_remove_host(mmc); 1229 1230 writel(0, host->base + MMCIMASK0); 1231 writel(0, host->base + MMCIMASK1); 1232 1233 writel(0, host->base + MMCICOMMAND); 1234 writel(0, host->base + MMCIDATACTRL); 1235 1236 mmci_dma_release(host); 1237 free_irq(dev->irq[0], host); 1238 if (!host->singleirq) 1239 free_irq(dev->irq[1], host); 1240 1241 if (host->gpio_wp != -ENOSYS) 1242 gpio_free(host->gpio_wp); 1243 if (host->gpio_cd_irq >= 0) 1244 free_irq(host->gpio_cd_irq, host); 1245 if (host->gpio_cd != -ENOSYS) 1246 gpio_free(host->gpio_cd); 1247 1248 iounmap(host->base); 1249 clk_disable(host->clk); 1250 clk_put(host->clk); 1251 1252 if (host->vcc) 1253 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1254 regulator_put(host->vcc); 1255 1256 mmc_free_host(mmc); 1257 1258 amba_release_regions(dev); 1259 } 1260 1261 return 0; 1262 } 1263 1264 #ifdef CONFIG_PM 1265 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1266 { 1267 struct mmc_host *mmc = amba_get_drvdata(dev); 1268 int ret = 0; 1269 1270 if (mmc) { 1271 struct mmci_host *host = mmc_priv(mmc); 1272 1273 ret = mmc_suspend_host(mmc); 1274 if (ret == 0) 1275 writel(0, host->base + MMCIMASK0); 1276 } 1277 1278 return ret; 1279 } 1280 1281 static int mmci_resume(struct amba_device *dev) 1282 { 1283 struct mmc_host *mmc = amba_get_drvdata(dev); 1284 int ret = 0; 1285 1286 if (mmc) { 1287 struct mmci_host *host = mmc_priv(mmc); 1288 1289 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1290 1291 ret = mmc_resume_host(mmc); 1292 } 1293 1294 return ret; 1295 } 1296 #else 1297 #define mmci_suspend NULL 1298 #define mmci_resume NULL 1299 #endif 1300 1301 static struct amba_id mmci_ids[] = { 1302 { 1303 .id = 0x00041180, 1304 .mask = 0xff0fffff, 1305 .data = &variant_arm, 1306 }, 1307 { 1308 .id = 0x01041180, 1309 .mask = 0xff0fffff, 1310 .data = &variant_arm_extended_fifo, 1311 }, 1312 { 1313 .id = 0x00041181, 1314 .mask = 0x000fffff, 1315 .data = &variant_arm, 1316 }, 1317 /* ST Micro variants */ 1318 { 1319 .id = 0x00180180, 1320 .mask = 0x00ffffff, 1321 .data = &variant_u300, 1322 }, 1323 { 1324 .id = 0x00280180, 1325 .mask = 0x00ffffff, 1326 .data = &variant_u300, 1327 }, 1328 { 1329 .id = 0x00480180, 1330 .mask = 0xf0ffffff, 1331 .data = &variant_ux500, 1332 }, 1333 { 1334 .id = 0x10480180, 1335 .mask = 0xf0ffffff, 1336 .data = &variant_ux500v2, 1337 }, 1338 { 0, 0 }, 1339 }; 1340 1341 static struct amba_driver mmci_driver = { 1342 .drv = { 1343 .name = DRIVER_NAME, 1344 }, 1345 .probe = mmci_probe, 1346 .remove = __devexit_p(mmci_remove), 1347 .suspend = mmci_suspend, 1348 .resume = mmci_resume, 1349 .id_table = mmci_ids, 1350 }; 1351 1352 static int __init mmci_init(void) 1353 { 1354 return amba_driver_register(&mmci_driver); 1355 } 1356 1357 static void __exit mmci_exit(void) 1358 { 1359 amba_driver_unregister(&mmci_driver); 1360 } 1361 1362 module_init(mmci_init); 1363 module_exit(mmci_exit); 1364 module_param(fmax, uint, 0444); 1365 1366 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1367 MODULE_LICENSE("GPL"); 1368