1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/drivers/mmc/host/pxa.c - PXA MMCI driver 4 * 5 * Copyright (C) 2003 Russell King, All Rights Reserved. 6 * 7 * This hardware is really sick: 8 * - No way to clear interrupts. 9 * - Have to turn off the clock whenever we touch the device. 10 * - Doesn't tell you how many data blocks were transferred. 11 * Yuck! 12 * 13 * 1 and 3 byte data transfers not supported 14 * max block length up to 1023 15 */ 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/ioport.h> 19 #include <linux/platform_device.h> 20 #include <linux/delay.h> 21 #include <linux/interrupt.h> 22 #include <linux/dmaengine.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/clk.h> 25 #include <linux/err.h> 26 #include <linux/mmc/host.h> 27 #include <linux/mmc/slot-gpio.h> 28 #include <linux/io.h> 29 #include <linux/regulator/consumer.h> 30 #include <linux/gpio/consumer.h> 31 #include <linux/gfp.h> 32 #include <linux/of.h> 33 #include <linux/of_device.h> 34 #include <linux/soc/pxa/cpu.h> 35 36 #include <linux/sizes.h> 37 38 #include <linux/platform_data/mmc-pxamci.h> 39 40 #include "pxamci.h" 41 42 #define DRIVER_NAME "pxa2xx-mci" 43 44 #define NR_SG 1 45 #define CLKRT_OFF (~0) 46 47 #define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \ 48 || cpu_is_pxa935()) 49 50 struct pxamci_host { 51 struct mmc_host *mmc; 52 spinlock_t lock; 53 struct resource *res; 54 void __iomem *base; 55 struct clk *clk; 56 unsigned long clkrate; 57 unsigned int clkrt; 58 unsigned int cmdat; 59 unsigned int imask; 60 unsigned int power_mode; 61 unsigned long detect_delay_ms; 62 bool use_ro_gpio; 63 struct gpio_desc *power; 64 struct pxamci_platform_data *pdata; 65 66 struct mmc_request *mrq; 67 struct mmc_command *cmd; 68 struct mmc_data *data; 69 70 struct dma_chan *dma_chan_rx; 71 struct dma_chan *dma_chan_tx; 72 dma_cookie_t dma_cookie; 73 unsigned int dma_len; 74 unsigned int dma_dir; 75 }; 76 77 static int pxamci_init_ocr(struct pxamci_host *host) 78 { 79 struct mmc_host *mmc = host->mmc; 80 int ret; 81 82 ret = mmc_regulator_get_supply(mmc); 83 if (ret < 0) 84 return ret; 85 86 if (IS_ERR(mmc->supply.vmmc)) { 87 /* fall-back to platform data */ 88 mmc->ocr_avail = host->pdata ? 89 host->pdata->ocr_mask : 90 MMC_VDD_32_33 | MMC_VDD_33_34; 91 } 92 93 return 0; 94 } 95 96 static inline int pxamci_set_power(struct pxamci_host *host, 97 unsigned char power_mode, 98 unsigned int vdd) 99 { 100 struct mmc_host *mmc = host->mmc; 101 struct regulator *supply = mmc->supply.vmmc; 102 103 if (!IS_ERR(supply)) 104 return mmc_regulator_set_ocr(mmc, supply, vdd); 105 106 if (host->power) { 107 bool on = !!((1 << vdd) & host->pdata->ocr_mask); 108 gpiod_set_value(host->power, on); 109 } 110 111 if (host->pdata && host->pdata->setpower) 112 return host->pdata->setpower(mmc_dev(host->mmc), vdd); 113 114 return 0; 115 } 116 117 static void pxamci_stop_clock(struct pxamci_host *host) 118 { 119 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { 120 unsigned long timeout = 10000; 121 unsigned int v; 122 123 writel(STOP_CLOCK, host->base + MMC_STRPCL); 124 125 do { 126 v = readl(host->base + MMC_STAT); 127 if (!(v & STAT_CLK_EN)) 128 break; 129 udelay(1); 130 } while (timeout--); 131 132 if (v & STAT_CLK_EN) 133 dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); 134 } 135 } 136 137 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) 138 { 139 unsigned long flags; 140 141 spin_lock_irqsave(&host->lock, flags); 142 host->imask &= ~mask; 143 writel(host->imask, host->base + MMC_I_MASK); 144 spin_unlock_irqrestore(&host->lock, flags); 145 } 146 147 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) 148 { 149 unsigned long flags; 150 151 spin_lock_irqsave(&host->lock, flags); 152 host->imask |= mask; 153 writel(host->imask, host->base + MMC_I_MASK); 154 spin_unlock_irqrestore(&host->lock, flags); 155 } 156 157 static void pxamci_dma_irq(void *param); 158 159 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 160 { 161 struct dma_async_tx_descriptor *tx; 162 enum dma_transfer_direction direction; 163 struct dma_slave_config config; 164 struct dma_chan *chan; 165 unsigned int nob = data->blocks; 166 unsigned long long clks; 167 unsigned int timeout; 168 int ret; 169 170 host->data = data; 171 172 writel(nob, host->base + MMC_NOB); 173 writel(data->blksz, host->base + MMC_BLKLEN); 174 175 clks = (unsigned long long)data->timeout_ns * host->clkrate; 176 do_div(clks, 1000000000UL); 177 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); 178 writel((timeout + 255) / 256, host->base + MMC_RDTO); 179 180 memset(&config, 0, sizeof(config)); 181 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 182 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 183 config.src_addr = host->res->start + MMC_RXFIFO; 184 config.dst_addr = host->res->start + MMC_TXFIFO; 185 config.src_maxburst = 32; 186 config.dst_maxburst = 32; 187 188 if (data->flags & MMC_DATA_READ) { 189 host->dma_dir = DMA_FROM_DEVICE; 190 direction = DMA_DEV_TO_MEM; 191 chan = host->dma_chan_rx; 192 } else { 193 host->dma_dir = DMA_TO_DEVICE; 194 direction = DMA_MEM_TO_DEV; 195 chan = host->dma_chan_tx; 196 } 197 198 config.direction = direction; 199 200 ret = dmaengine_slave_config(chan, &config); 201 if (ret < 0) { 202 dev_err(mmc_dev(host->mmc), "dma slave config failed\n"); 203 return; 204 } 205 206 host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, 207 host->dma_dir); 208 209 tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction, 210 DMA_PREP_INTERRUPT); 211 if (!tx) { 212 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); 213 return; 214 } 215 216 if (!(data->flags & MMC_DATA_READ)) { 217 tx->callback = pxamci_dma_irq; 218 tx->callback_param = host; 219 } 220 221 host->dma_cookie = dmaengine_submit(tx); 222 223 /* 224 * workaround for erratum #91: 225 * only start DMA now if we are doing a read, 226 * otherwise we wait until CMD/RESP has finished 227 * before starting DMA. 228 */ 229 if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ) 230 dma_async_issue_pending(chan); 231 } 232 233 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) 234 { 235 WARN_ON(host->cmd != NULL); 236 host->cmd = cmd; 237 238 if (cmd->flags & MMC_RSP_BUSY) 239 cmdat |= CMDAT_BUSY; 240 241 #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) 242 switch (RSP_TYPE(mmc_resp_type(cmd))) { 243 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ 244 cmdat |= CMDAT_RESP_SHORT; 245 break; 246 case RSP_TYPE(MMC_RSP_R3): 247 cmdat |= CMDAT_RESP_R3; 248 break; 249 case RSP_TYPE(MMC_RSP_R2): 250 cmdat |= CMDAT_RESP_R2; 251 break; 252 default: 253 break; 254 } 255 256 writel(cmd->opcode, host->base + MMC_CMD); 257 writel(cmd->arg >> 16, host->base + MMC_ARGH); 258 writel(cmd->arg & 0xffff, host->base + MMC_ARGL); 259 writel(cmdat, host->base + MMC_CMDAT); 260 writel(host->clkrt, host->base + MMC_CLKRT); 261 262 writel(START_CLOCK, host->base + MMC_STRPCL); 263 264 pxamci_enable_irq(host, END_CMD_RES); 265 } 266 267 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) 268 { 269 host->mrq = NULL; 270 host->cmd = NULL; 271 host->data = NULL; 272 mmc_request_done(host->mmc, mrq); 273 } 274 275 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) 276 { 277 struct mmc_command *cmd = host->cmd; 278 int i; 279 u32 v; 280 281 if (!cmd) 282 return 0; 283 284 host->cmd = NULL; 285 286 /* 287 * Did I mention this is Sick. We always need to 288 * discard the upper 8 bits of the first 16-bit word. 289 */ 290 v = readl(host->base + MMC_RES) & 0xffff; 291 for (i = 0; i < 4; i++) { 292 u32 w1 = readl(host->base + MMC_RES) & 0xffff; 293 u32 w2 = readl(host->base + MMC_RES) & 0xffff; 294 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8; 295 v = w2; 296 } 297 298 if (stat & STAT_TIME_OUT_RESPONSE) { 299 cmd->error = -ETIMEDOUT; 300 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) { 301 /* 302 * workaround for erratum #42: 303 * Intel PXA27x Family Processor Specification Update Rev 001 304 * A bogus CRC error can appear if the msb of a 136 bit 305 * response is a one. 306 */ 307 if (cpu_is_pxa27x() && 308 (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000)) 309 pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); 310 else 311 cmd->error = -EILSEQ; 312 } 313 314 pxamci_disable_irq(host, END_CMD_RES); 315 if (host->data && !cmd->error) { 316 pxamci_enable_irq(host, DATA_TRAN_DONE); 317 /* 318 * workaround for erratum #91, if doing write 319 * enable DMA late 320 */ 321 if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) 322 dma_async_issue_pending(host->dma_chan_tx); 323 } else { 324 pxamci_finish_request(host, host->mrq); 325 } 326 327 return 1; 328 } 329 330 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) 331 { 332 struct mmc_data *data = host->data; 333 struct dma_chan *chan; 334 335 if (!data) 336 return 0; 337 338 if (data->flags & MMC_DATA_READ) 339 chan = host->dma_chan_rx; 340 else 341 chan = host->dma_chan_tx; 342 dma_unmap_sg(chan->device->dev, 343 data->sg, data->sg_len, host->dma_dir); 344 345 if (stat & STAT_READ_TIME_OUT) 346 data->error = -ETIMEDOUT; 347 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR)) 348 data->error = -EILSEQ; 349 350 /* 351 * There appears to be a hardware design bug here. There seems to 352 * be no way to find out how much data was transferred to the card. 353 * This means that if there was an error on any block, we mark all 354 * data blocks as being in error. 355 */ 356 if (!data->error) 357 data->bytes_xfered = data->blocks * data->blksz; 358 else 359 data->bytes_xfered = 0; 360 361 pxamci_disable_irq(host, DATA_TRAN_DONE); 362 363 host->data = NULL; 364 if (host->mrq->stop) { 365 pxamci_stop_clock(host); 366 pxamci_start_cmd(host, host->mrq->stop, host->cmdat); 367 } else { 368 pxamci_finish_request(host, host->mrq); 369 } 370 371 return 1; 372 } 373 374 static irqreturn_t pxamci_irq(int irq, void *devid) 375 { 376 struct pxamci_host *host = devid; 377 unsigned int ireg; 378 int handled = 0; 379 380 ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK); 381 382 if (ireg) { 383 unsigned stat = readl(host->base + MMC_STAT); 384 385 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat); 386 387 if (ireg & END_CMD_RES) 388 handled |= pxamci_cmd_done(host, stat); 389 if (ireg & DATA_TRAN_DONE) 390 handled |= pxamci_data_done(host, stat); 391 if (ireg & SDIO_INT) { 392 mmc_signal_sdio_irq(host->mmc); 393 handled = 1; 394 } 395 } 396 397 return IRQ_RETVAL(handled); 398 } 399 400 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq) 401 { 402 struct pxamci_host *host = mmc_priv(mmc); 403 unsigned int cmdat; 404 405 WARN_ON(host->mrq != NULL); 406 407 host->mrq = mrq; 408 409 pxamci_stop_clock(host); 410 411 cmdat = host->cmdat; 412 host->cmdat &= ~CMDAT_INIT; 413 414 if (mrq->data) { 415 pxamci_setup_data(host, mrq->data); 416 417 cmdat &= ~CMDAT_BUSY; 418 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN; 419 if (mrq->data->flags & MMC_DATA_WRITE) 420 cmdat |= CMDAT_WRITE; 421 } 422 423 pxamci_start_cmd(host, mrq->cmd, cmdat); 424 } 425 426 static int pxamci_get_ro(struct mmc_host *mmc) 427 { 428 struct pxamci_host *host = mmc_priv(mmc); 429 430 if (host->use_ro_gpio) 431 return mmc_gpio_get_ro(mmc); 432 if (host->pdata && host->pdata->get_ro) 433 return !!host->pdata->get_ro(mmc_dev(mmc)); 434 /* 435 * Board doesn't support read only detection; let the mmc core 436 * decide what to do. 437 */ 438 return -ENOSYS; 439 } 440 441 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 442 { 443 struct pxamci_host *host = mmc_priv(mmc); 444 445 if (ios->clock) { 446 unsigned long rate = host->clkrate; 447 unsigned int clk = rate / ios->clock; 448 449 if (host->clkrt == CLKRT_OFF) 450 clk_prepare_enable(host->clk); 451 452 if (ios->clock == 26000000) { 453 /* to support 26MHz */ 454 host->clkrt = 7; 455 } else { 456 /* to handle (19.5MHz, 26MHz) */ 457 if (!clk) 458 clk = 1; 459 460 /* 461 * clk might result in a lower divisor than we 462 * desire. check for that condition and adjust 463 * as appropriate. 464 */ 465 if (rate / clk > ios->clock) 466 clk <<= 1; 467 host->clkrt = fls(clk) - 1; 468 } 469 470 /* 471 * we write clkrt on the next command 472 */ 473 } else { 474 pxamci_stop_clock(host); 475 if (host->clkrt != CLKRT_OFF) { 476 host->clkrt = CLKRT_OFF; 477 clk_disable_unprepare(host->clk); 478 } 479 } 480 481 if (host->power_mode != ios->power_mode) { 482 int ret; 483 484 host->power_mode = ios->power_mode; 485 486 ret = pxamci_set_power(host, ios->power_mode, ios->vdd); 487 if (ret) { 488 dev_err(mmc_dev(mmc), "unable to set power\n"); 489 /* 490 * The .set_ios() function in the mmc_host_ops 491 * struct return void, and failing to set the 492 * power should be rare so we print an error and 493 * return here. 494 */ 495 return; 496 } 497 498 if (ios->power_mode == MMC_POWER_ON) 499 host->cmdat |= CMDAT_INIT; 500 } 501 502 if (ios->bus_width == MMC_BUS_WIDTH_4) 503 host->cmdat |= CMDAT_SD_4DAT; 504 else 505 host->cmdat &= ~CMDAT_SD_4DAT; 506 507 dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n", 508 host->clkrt, host->cmdat); 509 } 510 511 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) 512 { 513 struct pxamci_host *pxa_host = mmc_priv(host); 514 515 if (enable) 516 pxamci_enable_irq(pxa_host, SDIO_INT); 517 else 518 pxamci_disable_irq(pxa_host, SDIO_INT); 519 } 520 521 static const struct mmc_host_ops pxamci_ops = { 522 .request = pxamci_request, 523 .get_cd = mmc_gpio_get_cd, 524 .get_ro = pxamci_get_ro, 525 .set_ios = pxamci_set_ios, 526 .enable_sdio_irq = pxamci_enable_sdio_irq, 527 }; 528 529 static void pxamci_dma_irq(void *param) 530 { 531 struct pxamci_host *host = param; 532 struct dma_tx_state state; 533 enum dma_status status; 534 struct dma_chan *chan; 535 unsigned long flags; 536 537 spin_lock_irqsave(&host->lock, flags); 538 539 if (!host->data) 540 goto out_unlock; 541 542 if (host->data->flags & MMC_DATA_READ) 543 chan = host->dma_chan_rx; 544 else 545 chan = host->dma_chan_tx; 546 547 status = dmaengine_tx_status(chan, host->dma_cookie, &state); 548 549 if (likely(status == DMA_COMPLETE)) { 550 writel(BUF_PART_FULL, host->base + MMC_PRTBUF); 551 } else { 552 pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc), 553 host->data->flags & MMC_DATA_READ ? "rx" : "tx"); 554 host->data->error = -EIO; 555 pxamci_data_done(host, 0); 556 } 557 558 out_unlock: 559 spin_unlock_irqrestore(&host->lock, flags); 560 } 561 562 static irqreturn_t pxamci_detect_irq(int irq, void *devid) 563 { 564 struct pxamci_host *host = mmc_priv(devid); 565 566 mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms)); 567 return IRQ_HANDLED; 568 } 569 570 #ifdef CONFIG_OF 571 static const struct of_device_id pxa_mmc_dt_ids[] = { 572 { .compatible = "marvell,pxa-mmc" }, 573 { } 574 }; 575 576 MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids); 577 578 static int pxamci_of_init(struct platform_device *pdev, 579 struct mmc_host *mmc) 580 { 581 struct device_node *np = pdev->dev.of_node; 582 struct pxamci_host *host = mmc_priv(mmc); 583 u32 tmp; 584 int ret; 585 586 if (!np) 587 return 0; 588 589 /* pxa-mmc specific */ 590 if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0) 591 host->detect_delay_ms = tmp; 592 593 ret = mmc_of_parse(mmc); 594 if (ret < 0) 595 return ret; 596 597 return 0; 598 } 599 #else 600 static int pxamci_of_init(struct platform_device *pdev, 601 struct mmc_host *mmc) 602 { 603 return 0; 604 } 605 #endif 606 607 static int pxamci_probe(struct platform_device *pdev) 608 { 609 struct mmc_host *mmc; 610 struct pxamci_host *host = NULL; 611 struct device *dev = &pdev->dev; 612 struct resource *r; 613 int ret, irq; 614 615 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 616 irq = platform_get_irq(pdev, 0); 617 if (irq < 0) 618 return irq; 619 620 mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev); 621 if (!mmc) { 622 ret = -ENOMEM; 623 goto out; 624 } 625 626 mmc->ops = &pxamci_ops; 627 628 /* 629 * We can do SG-DMA, but we don't because we never know how much 630 * data we successfully wrote to the card. 631 */ 632 mmc->max_segs = NR_SG; 633 634 /* 635 * Our hardware DMA can handle a maximum of one page per SG entry. 636 */ 637 mmc->max_seg_size = PAGE_SIZE; 638 639 /* 640 * Block length register is only 10 bits before PXA27x. 641 */ 642 mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048; 643 644 /* 645 * Block count register is 16 bits. 646 */ 647 mmc->max_blk_count = 65535; 648 649 ret = pxamci_of_init(pdev, mmc); 650 if (ret) 651 goto out; 652 653 host = mmc_priv(mmc); 654 host->mmc = mmc; 655 host->pdata = pdev->dev.platform_data; 656 host->clkrt = CLKRT_OFF; 657 658 host->clk = devm_clk_get(dev, NULL); 659 if (IS_ERR(host->clk)) { 660 ret = PTR_ERR(host->clk); 661 host->clk = NULL; 662 goto out; 663 } 664 665 host->clkrate = clk_get_rate(host->clk); 666 667 /* 668 * Calculate minimum clock rate, rounding up. 669 */ 670 mmc->f_min = (host->clkrate + 63) / 64; 671 mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate; 672 673 ret = pxamci_init_ocr(host); 674 if (ret < 0) 675 goto out; 676 677 mmc->caps = 0; 678 host->cmdat = 0; 679 if (!cpu_is_pxa25x()) { 680 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 681 host->cmdat |= CMDAT_SDIO_INT_EN; 682 if (mmc_has_26MHz()) 683 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | 684 MMC_CAP_SD_HIGHSPEED; 685 } 686 687 spin_lock_init(&host->lock); 688 host->res = r; 689 host->imask = MMC_I_MASK_ALL; 690 691 host->base = devm_ioremap_resource(dev, r); 692 if (IS_ERR(host->base)) { 693 ret = PTR_ERR(host->base); 694 goto out; 695 } 696 697 /* 698 * Ensure that the host controller is shut down, and setup 699 * with our defaults. 700 */ 701 pxamci_stop_clock(host); 702 writel(0, host->base + MMC_SPI); 703 writel(64, host->base + MMC_RESTO); 704 writel(host->imask, host->base + MMC_I_MASK); 705 706 ret = devm_request_irq(dev, irq, pxamci_irq, 0, 707 DRIVER_NAME, host); 708 if (ret) 709 goto out; 710 711 platform_set_drvdata(pdev, mmc); 712 713 host->dma_chan_rx = dma_request_chan(dev, "rx"); 714 if (IS_ERR(host->dma_chan_rx)) { 715 dev_err(dev, "unable to request rx dma channel\n"); 716 ret = PTR_ERR(host->dma_chan_rx); 717 host->dma_chan_rx = NULL; 718 goto out; 719 } 720 721 host->dma_chan_tx = dma_request_chan(dev, "tx"); 722 if (IS_ERR(host->dma_chan_tx)) { 723 dev_err(dev, "unable to request tx dma channel\n"); 724 ret = PTR_ERR(host->dma_chan_tx); 725 host->dma_chan_tx = NULL; 726 goto out; 727 } 728 729 if (host->pdata) { 730 host->detect_delay_ms = host->pdata->detect_delay_ms; 731 732 host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); 733 if (IS_ERR(host->power)) { 734 ret = PTR_ERR(host->power); 735 dev_err(dev, "Failed requesting gpio_power\n"); 736 goto out; 737 } 738 739 /* FIXME: should we pass detection delay to debounce? */ 740 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); 741 if (ret && ret != -ENOENT) { 742 dev_err(dev, "Failed requesting gpio_cd\n"); 743 goto out; 744 } 745 746 if (!host->pdata->gpio_card_ro_invert) 747 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 748 749 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); 750 if (ret && ret != -ENOENT) { 751 dev_err(dev, "Failed requesting gpio_ro\n"); 752 goto out; 753 } 754 if (!ret) 755 host->use_ro_gpio = true; 756 757 if (host->pdata->init) 758 host->pdata->init(dev, pxamci_detect_irq, mmc); 759 760 if (host->power && host->pdata->setpower) 761 dev_warn(dev, "gpio_power and setpower() both defined\n"); 762 if (host->use_ro_gpio && host->pdata->get_ro) 763 dev_warn(dev, "gpio_ro and get_ro() both defined\n"); 764 } 765 766 ret = mmc_add_host(mmc); 767 if (ret) { 768 if (host->pdata && host->pdata->exit) 769 host->pdata->exit(dev, mmc); 770 goto out; 771 } 772 773 return 0; 774 775 out: 776 if (host) { 777 if (host->dma_chan_rx) 778 dma_release_channel(host->dma_chan_rx); 779 if (host->dma_chan_tx) 780 dma_release_channel(host->dma_chan_tx); 781 } 782 if (mmc) 783 mmc_free_host(mmc); 784 return ret; 785 } 786 787 static int pxamci_remove(struct platform_device *pdev) 788 { 789 struct mmc_host *mmc = platform_get_drvdata(pdev); 790 791 if (mmc) { 792 struct pxamci_host *host = mmc_priv(mmc); 793 794 mmc_remove_host(mmc); 795 796 if (host->pdata && host->pdata->exit) 797 host->pdata->exit(&pdev->dev, mmc); 798 799 pxamci_stop_clock(host); 800 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| 801 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 802 host->base + MMC_I_MASK); 803 804 dmaengine_terminate_all(host->dma_chan_rx); 805 dmaengine_terminate_all(host->dma_chan_tx); 806 dma_release_channel(host->dma_chan_rx); 807 dma_release_channel(host->dma_chan_tx); 808 809 mmc_free_host(mmc); 810 } 811 812 return 0; 813 } 814 815 static struct platform_driver pxamci_driver = { 816 .probe = pxamci_probe, 817 .remove = pxamci_remove, 818 .driver = { 819 .name = DRIVER_NAME, 820 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 821 .of_match_table = of_match_ptr(pxa_mmc_dt_ids), 822 }, 823 }; 824 825 module_platform_driver(pxamci_driver); 826 827 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); 828 MODULE_LICENSE("GPL"); 829 MODULE_ALIAS("platform:pxa2xx-mci"); 830