1 /* 2 * Driver for the MMC / SD / SDIO IP found in: 3 * 4 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs 5 * 6 * Copyright (C) 2015-17 Renesas Electronics Corporation 7 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang 8 * Copyright (C) 2017 Horms Solutions, Simon Horman 9 * Copyright (C) 2011 Guennadi Liakhovetski 10 * Copyright (C) 2007 Ian Molton 11 * Copyright (C) 2004 Ian Molton 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 as 15 * published by the Free Software Foundation. 16 * 17 * This driver draws mainly on scattered spec sheets, Reverse engineering 18 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 19 * support). (Further 4 bit support from a later datasheet). 20 * 21 * TODO: 22 * Investigate using a workqueue for PIO transfers 23 * Eliminate FIXMEs 24 * Better Power management 25 * Handle MMC errors better 26 * double buffer support 27 * 28 */ 29 30 #include <linux/delay.h> 31 #include <linux/device.h> 32 #include <linux/highmem.h> 33 #include <linux/interrupt.h> 34 #include <linux/io.h> 35 #include <linux/irq.h> 36 #include <linux/mfd/tmio.h> 37 #include <linux/mmc/card.h> 38 #include <linux/mmc/host.h> 39 #include <linux/mmc/mmc.h> 40 #include <linux/mmc/slot-gpio.h> 41 #include <linux/module.h> 42 #include <linux/pagemap.h> 43 #include <linux/platform_device.h> 44 #include <linux/pm_qos.h> 45 #include <linux/pm_runtime.h> 46 #include <linux/regulator/consumer.h> 47 #include <linux/mmc/sdio.h> 48 #include <linux/scatterlist.h> 49 #include <linux/spinlock.h> 50 #include <linux/workqueue.h> 51 52 #include "tmio_mmc.h" 53 54 static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, 55 struct mmc_data *data) 56 { 57 if (host->dma_ops) 58 host->dma_ops->start(host, data); 59 } 60 61 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 62 { 63 if (host->dma_ops) 64 host->dma_ops->enable(host, enable); 65 } 66 67 static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, 68 struct tmio_mmc_data *pdata) 69 { 70 if (host->dma_ops) { 71 host->dma_ops->request(host, pdata); 72 } else { 73 host->chan_tx = NULL; 74 host->chan_rx = NULL; 75 } 76 } 77 78 static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) 79 { 80 if (host->dma_ops) 81 host->dma_ops->release(host); 82 } 83 84 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) 85 { 86 if (host->dma_ops) 87 host->dma_ops->abort(host); 88 } 89 90 static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host) 91 { 92 if (host->dma_ops) 93 host->dma_ops->dataend(host); 94 } 95 96 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 97 { 98 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); 99 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask); 100 } 101 EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs); 102 103 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 104 { 105 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); 106 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask); 107 } 108 EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs); 109 110 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) 111 { 112 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i); 113 } 114 115 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 116 { 117 host->sg_len = data->sg_len; 118 host->sg_ptr = data->sg; 119 host->sg_orig = data->sg; 120 host->sg_off = 0; 121 } 122 123 static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 124 { 125 host->sg_ptr = sg_next(host->sg_ptr); 126 host->sg_off = 0; 127 return --host->sg_len; 128 } 129 130 #define CMDREQ_TIMEOUT 5000 131 132 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 133 { 134 struct tmio_mmc_host *host = mmc_priv(mmc); 135 136 if (enable && !host->sdio_irq_enabled) { 137 u16 sdio_status; 138 139 /* Keep device active while SDIO irq is enabled */ 140 pm_runtime_get_sync(mmc_dev(mmc)); 141 142 host->sdio_irq_enabled = true; 143 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ; 144 145 /* Clear obsolete interrupts before enabling */ 146 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL; 147 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS) 148 sdio_status |= TMIO_SDIO_SETBITS_MASK; 149 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); 150 151 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 152 } else if (!enable && host->sdio_irq_enabled) { 153 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; 154 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 155 156 host->sdio_irq_enabled = false; 157 pm_runtime_mark_last_busy(mmc_dev(mmc)); 158 pm_runtime_put_autosuspend(mmc_dev(mmc)); 159 } 160 } 161 162 static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 163 { 164 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | 165 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 166 167 /* HW engineers overrode docs: no sleep needed on R-Car2+ */ 168 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) 169 msleep(10); 170 171 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { 172 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 173 msleep(10); 174 } 175 } 176 177 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 178 { 179 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { 180 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 181 msleep(10); 182 } 183 184 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & 185 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 186 187 /* HW engineers overrode docs: no sleep needed on R-Car2+ */ 188 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) 189 msleep(10); 190 } 191 192 static void tmio_mmc_set_clock(struct tmio_mmc_host *host, 193 unsigned int new_clock) 194 { 195 u32 clk = 0, clock; 196 197 if (new_clock == 0) { 198 tmio_mmc_clk_stop(host); 199 return; 200 } 201 202 if (host->clk_update) 203 clock = host->clk_update(host, new_clock) / 512; 204 else 205 clock = host->mmc->f_min; 206 207 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) 208 clock <<= 1; 209 210 /* 1/1 clock is option */ 211 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1)) 212 clk |= 0xff; 213 214 if (host->set_clk_div) 215 host->set_clk_div(host->pdev, (clk >> 22) & 1); 216 217 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & 218 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 219 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); 220 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) 221 msleep(10); 222 223 tmio_mmc_clk_start(host); 224 } 225 226 static void tmio_mmc_reset(struct tmio_mmc_host *host) 227 { 228 /* FIXME - should we set stop clock reg here */ 229 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 230 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) 231 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 232 msleep(10); 233 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 234 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) 235 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 236 msleep(10); 237 238 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) { 239 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); 240 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 241 } 242 243 } 244 245 static void tmio_mmc_reset_work(struct work_struct *work) 246 { 247 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 248 delayed_reset_work.work); 249 struct mmc_request *mrq; 250 unsigned long flags; 251 252 spin_lock_irqsave(&host->lock, flags); 253 mrq = host->mrq; 254 255 /* 256 * is request already finished? Since we use a non-blocking 257 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts 258 * us, so, have to check for IS_ERR(host->mrq) 259 */ 260 if (IS_ERR_OR_NULL(mrq) || 261 time_is_after_jiffies(host->last_req_ts + 262 msecs_to_jiffies(CMDREQ_TIMEOUT))) { 263 spin_unlock_irqrestore(&host->lock, flags); 264 return; 265 } 266 267 dev_warn(&host->pdev->dev, 268 "timeout waiting for hardware interrupt (CMD%u)\n", 269 mrq->cmd->opcode); 270 271 if (host->data) 272 host->data->error = -ETIMEDOUT; 273 else if (host->cmd) 274 host->cmd->error = -ETIMEDOUT; 275 else 276 mrq->cmd->error = -ETIMEDOUT; 277 278 host->cmd = NULL; 279 host->data = NULL; 280 host->force_pio = false; 281 282 spin_unlock_irqrestore(&host->lock, flags); 283 284 tmio_mmc_reset(host); 285 286 /* Ready for new calls */ 287 host->mrq = NULL; 288 289 tmio_mmc_abort_dma(host); 290 mmc_request_done(host->mmc, mrq); 291 } 292 293 /* These are the bitmasks the tmio chip requires to implement the MMC response 294 * types. Note that R1 and R6 are the same in this scheme. */ 295 #define APP_CMD 0x0040 296 #define RESP_NONE 0x0300 297 #define RESP_R1 0x0400 298 #define RESP_R1B 0x0500 299 #define RESP_R2 0x0600 300 #define RESP_R3 0x0700 301 #define DATA_PRESENT 0x0800 302 #define TRANSFER_READ 0x1000 303 #define TRANSFER_MULTI 0x2000 304 #define SECURITY_CMD 0x4000 305 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */ 306 307 static int tmio_mmc_start_command(struct tmio_mmc_host *host, 308 struct mmc_command *cmd) 309 { 310 struct mmc_data *data = host->data; 311 int c = cmd->opcode; 312 u32 irq_mask = TMIO_MASK_CMD; 313 314 switch (mmc_resp_type(cmd)) { 315 case MMC_RSP_NONE: c |= RESP_NONE; break; 316 case MMC_RSP_R1: 317 case MMC_RSP_R1_NO_CRC: 318 c |= RESP_R1; break; 319 case MMC_RSP_R1B: c |= RESP_R1B; break; 320 case MMC_RSP_R2: c |= RESP_R2; break; 321 case MMC_RSP_R3: c |= RESP_R3; break; 322 default: 323 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 324 return -EINVAL; 325 } 326 327 host->cmd = cmd; 328 329 /* FIXME - this seems to be ok commented out but the spec suggest this bit 330 * should be set when issuing app commands. 331 * if(cmd->flags & MMC_FLAG_ACMD) 332 * c |= APP_CMD; 333 */ 334 if (data) { 335 c |= DATA_PRESENT; 336 if (data->blocks > 1) { 337 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC); 338 c |= TRANSFER_MULTI; 339 340 /* 341 * Disable auto CMD12 at IO_RW_EXTENDED and 342 * SET_BLOCK_COUNT when doing multiple block transfer 343 */ 344 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) && 345 (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc)) 346 c |= NO_CMD12_ISSUE; 347 } 348 if (data->flags & MMC_DATA_READ) 349 c |= TRANSFER_READ; 350 } 351 352 if (!host->native_hotplug) 353 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); 354 tmio_mmc_enable_mmc_irqs(host, irq_mask); 355 356 /* Fire off the command */ 357 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg); 358 sd_ctrl_write16(host, CTL_SD_CMD, c); 359 360 return 0; 361 } 362 363 static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, 364 unsigned short *buf, 365 unsigned int count) 366 { 367 int is_read = host->data->flags & MMC_DATA_READ; 368 u8 *buf8; 369 370 /* 371 * Transfer the data 372 */ 373 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) { 374 u32 data = 0; 375 u32 *buf32 = (u32 *)buf; 376 377 if (is_read) 378 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32, 379 count >> 2); 380 else 381 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32, 382 count >> 2); 383 384 /* if count was multiple of 4 */ 385 if (!(count & 0x3)) 386 return; 387 388 buf32 += count >> 2; 389 count %= 4; 390 391 if (is_read) { 392 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1); 393 memcpy(buf32, &data, count); 394 } else { 395 memcpy(&data, buf32, count); 396 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1); 397 } 398 399 return; 400 } 401 402 if (is_read) 403 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 404 else 405 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 406 407 /* if count was even number */ 408 if (!(count & 0x1)) 409 return; 410 411 /* if count was odd number */ 412 buf8 = (u8 *)(buf + (count >> 1)); 413 414 /* 415 * FIXME 416 * 417 * driver and this function are assuming that 418 * it is used as little endian 419 */ 420 if (is_read) 421 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff; 422 else 423 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8); 424 } 425 426 /* 427 * This chip always returns (at least?) as much data as you ask for. 428 * I'm unsure what happens if you ask for less than a block. This should be 429 * looked into to ensure that a funny length read doesn't hose the controller. 430 */ 431 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 432 { 433 struct mmc_data *data = host->data; 434 void *sg_virt; 435 unsigned short *buf; 436 unsigned int count; 437 unsigned long flags; 438 439 if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 440 pr_err("PIO IRQ in DMA mode!\n"); 441 return; 442 } else if (!data) { 443 pr_debug("Spurious PIO IRQ\n"); 444 return; 445 } 446 447 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 448 buf = (unsigned short *)(sg_virt + host->sg_off); 449 450 count = host->sg_ptr->length - host->sg_off; 451 if (count > data->blksz) 452 count = data->blksz; 453 454 pr_debug("count: %08x offset: %08x flags %08x\n", 455 count, host->sg_off, data->flags); 456 457 /* Transfer the data */ 458 tmio_mmc_transfer_data(host, buf, count); 459 460 host->sg_off += count; 461 462 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 463 464 if (host->sg_off == host->sg_ptr->length) 465 tmio_mmc_next_sg(host); 466 } 467 468 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) 469 { 470 if (host->sg_ptr == &host->bounce_sg) { 471 unsigned long flags; 472 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 473 474 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 475 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 476 } 477 } 478 479 /* needs to be called with host->lock held */ 480 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 481 { 482 struct mmc_data *data = host->data; 483 struct mmc_command *stop; 484 485 host->data = NULL; 486 487 if (!data) { 488 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 489 return; 490 } 491 stop = data->stop; 492 493 /* FIXME - return correct transfer count on errors */ 494 if (!data->error) 495 data->bytes_xfered = data->blocks * data->blksz; 496 else 497 data->bytes_xfered = 0; 498 499 pr_debug("Completed data request\n"); 500 501 /* 502 * FIXME: other drivers allow an optional stop command of any given type 503 * which we dont do, as the chip can auto generate them. 504 * Perhaps we can be smarter about when to use auto CMD12 and 505 * only issue the auto request when we know this is the desired 506 * stop command, allowing fallback to the stop command the 507 * upper layers expect. For now, we do what works. 508 */ 509 510 if (data->flags & MMC_DATA_READ) { 511 if (host->chan_rx && !host->force_pio) 512 tmio_mmc_check_bounce_buffer(host); 513 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 514 host->mrq); 515 } else { 516 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 517 host->mrq); 518 } 519 520 if (stop && !host->mrq->sbc) { 521 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg) 522 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n", 523 stop->opcode, stop->arg); 524 525 /* fill in response from auto CMD12 */ 526 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE); 527 528 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0); 529 } 530 531 schedule_work(&host->done); 532 } 533 EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq); 534 535 static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) 536 { 537 struct mmc_data *data; 538 539 spin_lock(&host->lock); 540 data = host->data; 541 542 if (!data) 543 goto out; 544 545 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR || 546 stat & TMIO_STAT_TXUNDERRUN) 547 data->error = -EILSEQ; 548 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 549 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); 550 bool done = false; 551 552 /* 553 * Has all data been written out yet? Testing on SuperH showed, 554 * that in most cases the first interrupt comes already with the 555 * BUSY status bit clear, but on some operations, like mount or 556 * in the beginning of a write / sync / umount, there is one 557 * DATAEND interrupt with the BUSY bit set, in this cases 558 * waiting for one more interrupt fixes the problem. 559 */ 560 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) { 561 if (status & TMIO_STAT_SCLKDIVEN) 562 done = true; 563 } else { 564 if (!(status & TMIO_STAT_CMD_BUSY)) 565 done = true; 566 } 567 568 if (done) { 569 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 570 tmio_mmc_dataend_dma(host); 571 } 572 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 573 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 574 tmio_mmc_dataend_dma(host); 575 } else { 576 tmio_mmc_do_data_irq(host); 577 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 578 } 579 out: 580 spin_unlock(&host->lock); 581 } 582 583 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) 584 { 585 struct mmc_command *cmd = host->cmd; 586 int i, addr; 587 588 spin_lock(&host->lock); 589 590 if (!host->cmd) { 591 pr_debug("Spurious CMD irq\n"); 592 goto out; 593 } 594 595 /* This controller is sicker than the PXA one. Not only do we need to 596 * drop the top 8 bits of the first response word, we also need to 597 * modify the order of the response for short response command types. 598 */ 599 600 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 601 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr); 602 603 if (cmd->flags & MMC_RSP_136) { 604 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 605 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 606 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 607 cmd->resp[3] <<= 8; 608 } else if (cmd->flags & MMC_RSP_R3) { 609 cmd->resp[0] = cmd->resp[3]; 610 } 611 612 if (stat & TMIO_STAT_CMDTIMEOUT) 613 cmd->error = -ETIMEDOUT; 614 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) || 615 stat & TMIO_STAT_STOPBIT_ERR || 616 stat & TMIO_STAT_CMD_IDX_ERR) 617 cmd->error = -EILSEQ; 618 619 /* If there is data to handle we enable data IRQs here, and 620 * we will ultimatley finish the request in the data_end handler. 621 * If theres no data or we encountered an error, finish now. 622 */ 623 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) { 624 if (host->data->flags & MMC_DATA_READ) { 625 if (host->force_pio || !host->chan_rx) 626 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); 627 else 628 tasklet_schedule(&host->dma_issue); 629 } else { 630 if (host->force_pio || !host->chan_tx) 631 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 632 else 633 tasklet_schedule(&host->dma_issue); 634 } 635 } else { 636 schedule_work(&host->done); 637 } 638 639 out: 640 spin_unlock(&host->lock); 641 } 642 643 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, 644 int ireg, int status) 645 { 646 struct mmc_host *mmc = host->mmc; 647 648 /* Card insert / remove attempts */ 649 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 650 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 651 TMIO_STAT_CARD_REMOVE); 652 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || 653 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && 654 !work_pending(&mmc->detect.work)) 655 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 656 return true; 657 } 658 659 return false; 660 } 661 662 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, 663 int status) 664 { 665 /* Command completion */ 666 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 667 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND | 668 TMIO_STAT_CMDTIMEOUT); 669 tmio_mmc_cmd_irq(host, status); 670 return true; 671 } 672 673 /* Data transfer */ 674 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 675 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 676 tmio_mmc_pio_irq(host); 677 return true; 678 } 679 680 /* Data transfer completion */ 681 if (ireg & TMIO_STAT_DATAEND) { 682 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 683 tmio_mmc_data_irq(host, status); 684 return true; 685 } 686 687 return false; 688 } 689 690 static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) 691 { 692 struct mmc_host *mmc = host->mmc; 693 struct tmio_mmc_data *pdata = host->pdata; 694 unsigned int ireg, status; 695 unsigned int sdio_status; 696 697 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) 698 return; 699 700 status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 701 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; 702 703 sdio_status = status & ~TMIO_SDIO_MASK_ALL; 704 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS) 705 sdio_status |= TMIO_SDIO_SETBITS_MASK; 706 707 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); 708 709 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) 710 mmc_signal_sdio_irq(mmc); 711 } 712 713 irqreturn_t tmio_mmc_irq(int irq, void *devid) 714 { 715 struct tmio_mmc_host *host = devid; 716 unsigned int ireg, status; 717 718 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); 719 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; 720 721 /* Clear the status except the interrupt status */ 722 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); 723 724 if (__tmio_mmc_card_detect_irq(host, ireg, status)) 725 return IRQ_HANDLED; 726 if (__tmio_mmc_sdcard_irq(host, ireg, status)) 727 return IRQ_HANDLED; 728 729 __tmio_mmc_sdio_irq(host); 730 731 return IRQ_HANDLED; 732 } 733 EXPORT_SYMBOL_GPL(tmio_mmc_irq); 734 735 static int tmio_mmc_start_data(struct tmio_mmc_host *host, 736 struct mmc_data *data) 737 { 738 struct tmio_mmc_data *pdata = host->pdata; 739 740 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 741 data->blksz, data->blocks); 742 743 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */ 744 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 || 745 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) { 746 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 747 748 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 749 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n", 750 mmc_hostname(host->mmc), data->blksz); 751 return -EINVAL; 752 } 753 } 754 755 tmio_mmc_init_sg(host, data); 756 host->data = data; 757 758 /* Set transfer length / blocksize */ 759 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 760 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 761 762 tmio_mmc_start_dma(host, data); 763 764 return 0; 765 } 766 767 static void tmio_mmc_hw_reset(struct mmc_host *mmc) 768 { 769 struct tmio_mmc_host *host = mmc_priv(mmc); 770 771 if (host->hw_reset) 772 host->hw_reset(host); 773 } 774 775 static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 776 { 777 struct tmio_mmc_host *host = mmc_priv(mmc); 778 int i, ret = 0; 779 780 if (!host->init_tuning || !host->select_tuning) 781 /* Tuning is not supported */ 782 goto out; 783 784 host->tap_num = host->init_tuning(host); 785 if (!host->tap_num) 786 /* Tuning is not supported */ 787 goto out; 788 789 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) { 790 dev_warn_once(&host->pdev->dev, 791 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n"); 792 goto out; 793 } 794 795 bitmap_zero(host->taps, host->tap_num * 2); 796 797 /* Issue CMD19 twice for each tap */ 798 for (i = 0; i < 2 * host->tap_num; i++) { 799 if (host->prepare_tuning) 800 host->prepare_tuning(host, i % host->tap_num); 801 802 ret = mmc_send_tuning(mmc, opcode, NULL); 803 if (ret && ret != -EILSEQ) 804 goto out; 805 if (ret == 0) 806 set_bit(i, host->taps); 807 808 mdelay(1); 809 } 810 811 ret = host->select_tuning(host); 812 813 out: 814 if (ret < 0) { 815 dev_warn(&host->pdev->dev, "Tuning procedure failed\n"); 816 tmio_mmc_hw_reset(mmc); 817 } 818 819 return ret; 820 } 821 822 static void tmio_process_mrq(struct tmio_mmc_host *host, 823 struct mmc_request *mrq) 824 { 825 struct mmc_command *cmd; 826 int ret; 827 828 if (mrq->sbc && host->cmd != mrq->sbc) { 829 cmd = mrq->sbc; 830 } else { 831 cmd = mrq->cmd; 832 if (mrq->data) { 833 ret = tmio_mmc_start_data(host, mrq->data); 834 if (ret) 835 goto fail; 836 } 837 } 838 839 ret = tmio_mmc_start_command(host, cmd); 840 if (ret) 841 goto fail; 842 843 schedule_delayed_work(&host->delayed_reset_work, 844 msecs_to_jiffies(CMDREQ_TIMEOUT)); 845 return; 846 847 fail: 848 host->force_pio = false; 849 host->mrq = NULL; 850 mrq->cmd->error = ret; 851 mmc_request_done(host->mmc, mrq); 852 } 853 854 /* Process requests from the MMC layer */ 855 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 856 { 857 struct tmio_mmc_host *host = mmc_priv(mmc); 858 unsigned long flags; 859 860 spin_lock_irqsave(&host->lock, flags); 861 862 if (host->mrq) { 863 pr_debug("request not null\n"); 864 if (IS_ERR(host->mrq)) { 865 spin_unlock_irqrestore(&host->lock, flags); 866 mrq->cmd->error = -EAGAIN; 867 mmc_request_done(mmc, mrq); 868 return; 869 } 870 } 871 872 host->last_req_ts = jiffies; 873 wmb(); 874 host->mrq = mrq; 875 876 spin_unlock_irqrestore(&host->lock, flags); 877 878 tmio_process_mrq(host, mrq); 879 } 880 881 static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 882 { 883 struct mmc_request *mrq; 884 unsigned long flags; 885 886 spin_lock_irqsave(&host->lock, flags); 887 888 mrq = host->mrq; 889 if (IS_ERR_OR_NULL(mrq)) { 890 spin_unlock_irqrestore(&host->lock, flags); 891 return; 892 } 893 894 /* If not SET_BLOCK_COUNT, clear old data */ 895 if (host->cmd != mrq->sbc) { 896 host->cmd = NULL; 897 host->data = NULL; 898 host->force_pio = false; 899 host->mrq = NULL; 900 } 901 902 cancel_delayed_work(&host->delayed_reset_work); 903 904 spin_unlock_irqrestore(&host->lock, flags); 905 906 if (mrq->cmd->error || (mrq->data && mrq->data->error)) 907 tmio_mmc_abort_dma(host); 908 909 if (host->check_scc_error) 910 host->check_scc_error(host); 911 912 /* If SET_BLOCK_COUNT, continue with main command */ 913 if (host->mrq) { 914 tmio_process_mrq(host, mrq); 915 return; 916 } 917 918 mmc_request_done(host->mmc, mrq); 919 } 920 921 static void tmio_mmc_done_work(struct work_struct *work) 922 { 923 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 924 done); 925 tmio_mmc_finish_request(host); 926 } 927 928 static int tmio_mmc_clk_enable(struct tmio_mmc_host *host) 929 { 930 if (!host->clk_enable) 931 return -ENOTSUPP; 932 933 return host->clk_enable(host); 934 } 935 936 static void tmio_mmc_clk_disable(struct tmio_mmc_host *host) 937 { 938 if (host->clk_disable) 939 host->clk_disable(host); 940 } 941 942 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) 943 { 944 struct mmc_host *mmc = host->mmc; 945 int ret = 0; 946 947 /* .set_ios() is returning void, so, no chance to report an error */ 948 949 if (host->set_pwr) 950 host->set_pwr(host->pdev, 1); 951 952 if (!IS_ERR(mmc->supply.vmmc)) { 953 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 954 /* 955 * Attention: empiric value. With a b43 WiFi SDIO card this 956 * delay proved necessary for reliable card-insertion probing. 957 * 100us were not enough. Is this the same 140us delay, as in 958 * tmio_mmc_set_ios()? 959 */ 960 udelay(200); 961 } 962 /* 963 * It seems, VccQ should be switched on after Vcc, this is also what the 964 * omap_hsmmc.c driver does. 965 */ 966 if (!IS_ERR(mmc->supply.vqmmc) && !ret) { 967 ret = regulator_enable(mmc->supply.vqmmc); 968 udelay(200); 969 } 970 971 if (ret < 0) 972 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", 973 ret); 974 } 975 976 static void tmio_mmc_power_off(struct tmio_mmc_host *host) 977 { 978 struct mmc_host *mmc = host->mmc; 979 980 if (!IS_ERR(mmc->supply.vqmmc)) 981 regulator_disable(mmc->supply.vqmmc); 982 983 if (!IS_ERR(mmc->supply.vmmc)) 984 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 985 986 if (host->set_pwr) 987 host->set_pwr(host->pdev, 0); 988 } 989 990 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host, 991 unsigned char bus_width) 992 { 993 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT) 994 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8); 995 996 /* reg now applies to MMC_BUS_WIDTH_4 */ 997 if (bus_width == MMC_BUS_WIDTH_1) 998 reg |= CARD_OPT_WIDTH; 999 else if (bus_width == MMC_BUS_WIDTH_8) 1000 reg |= CARD_OPT_WIDTH8; 1001 1002 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg); 1003 } 1004 1005 /* Set MMC clock / power. 1006 * Note: This controller uses a simple divider scheme therefore it cannot 1007 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 1008 * MMC wont run that fast, it has to be clocked at 12MHz which is the next 1009 * slowest setting. 1010 */ 1011 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1012 { 1013 struct tmio_mmc_host *host = mmc_priv(mmc); 1014 struct device *dev = &host->pdev->dev; 1015 unsigned long flags; 1016 1017 mutex_lock(&host->ios_lock); 1018 1019 spin_lock_irqsave(&host->lock, flags); 1020 if (host->mrq) { 1021 if (IS_ERR(host->mrq)) { 1022 dev_dbg(dev, 1023 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", 1024 current->comm, task_pid_nr(current), 1025 ios->clock, ios->power_mode); 1026 host->mrq = ERR_PTR(-EINTR); 1027 } else { 1028 dev_dbg(dev, 1029 "%s.%d: CMD%u active since %lu, now %lu!\n", 1030 current->comm, task_pid_nr(current), 1031 host->mrq->cmd->opcode, host->last_req_ts, 1032 jiffies); 1033 } 1034 spin_unlock_irqrestore(&host->lock, flags); 1035 1036 mutex_unlock(&host->ios_lock); 1037 return; 1038 } 1039 1040 host->mrq = ERR_PTR(-EBUSY); 1041 1042 spin_unlock_irqrestore(&host->lock, flags); 1043 1044 switch (ios->power_mode) { 1045 case MMC_POWER_OFF: 1046 tmio_mmc_power_off(host); 1047 tmio_mmc_clk_stop(host); 1048 break; 1049 case MMC_POWER_UP: 1050 tmio_mmc_power_on(host, ios->vdd); 1051 tmio_mmc_set_clock(host, ios->clock); 1052 tmio_mmc_set_bus_width(host, ios->bus_width); 1053 break; 1054 case MMC_POWER_ON: 1055 tmio_mmc_set_clock(host, ios->clock); 1056 tmio_mmc_set_bus_width(host, ios->bus_width); 1057 break; 1058 } 1059 1060 /* Let things settle. delay taken from winCE driver */ 1061 udelay(140); 1062 if (PTR_ERR(host->mrq) == -EINTR) 1063 dev_dbg(&host->pdev->dev, 1064 "%s.%d: IOS interrupted: clk %u, mode %u", 1065 current->comm, task_pid_nr(current), 1066 ios->clock, ios->power_mode); 1067 host->mrq = NULL; 1068 1069 host->clk_cache = ios->clock; 1070 1071 mutex_unlock(&host->ios_lock); 1072 } 1073 1074 static int tmio_mmc_get_ro(struct mmc_host *mmc) 1075 { 1076 struct tmio_mmc_host *host = mmc_priv(mmc); 1077 struct tmio_mmc_data *pdata = host->pdata; 1078 int ret = mmc_gpio_get_ro(mmc); 1079 1080 if (ret >= 0) 1081 return ret; 1082 1083 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 1084 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 1085 1086 return ret; 1087 } 1088 1089 static int tmio_multi_io_quirk(struct mmc_card *card, 1090 unsigned int direction, int blk_size) 1091 { 1092 struct tmio_mmc_host *host = mmc_priv(card->host); 1093 1094 if (host->multi_io_quirk) 1095 return host->multi_io_quirk(card, direction, blk_size); 1096 1097 return blk_size; 1098 } 1099 1100 static struct mmc_host_ops tmio_mmc_ops = { 1101 .request = tmio_mmc_request, 1102 .set_ios = tmio_mmc_set_ios, 1103 .get_ro = tmio_mmc_get_ro, 1104 .get_cd = mmc_gpio_get_cd, 1105 .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 1106 .multi_io_quirk = tmio_multi_io_quirk, 1107 .hw_reset = tmio_mmc_hw_reset, 1108 .execute_tuning = tmio_mmc_execute_tuning, 1109 }; 1110 1111 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) 1112 { 1113 struct tmio_mmc_data *pdata = host->pdata; 1114 struct mmc_host *mmc = host->mmc; 1115 1116 mmc_regulator_get_supply(mmc); 1117 1118 /* use ocr_mask if no regulator */ 1119 if (!mmc->ocr_avail) 1120 mmc->ocr_avail = pdata->ocr_mask; 1121 1122 /* 1123 * try again. 1124 * There is possibility that regulator has not been probed 1125 */ 1126 if (!mmc->ocr_avail) 1127 return -EPROBE_DEFER; 1128 1129 return 0; 1130 } 1131 1132 static void tmio_mmc_of_parse(struct platform_device *pdev, 1133 struct tmio_mmc_data *pdata) 1134 { 1135 const struct device_node *np = pdev->dev.of_node; 1136 1137 if (!np) 1138 return; 1139 1140 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) 1141 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; 1142 } 1143 1144 struct tmio_mmc_host* 1145 tmio_mmc_host_alloc(struct platform_device *pdev) 1146 { 1147 struct tmio_mmc_host *host; 1148 struct mmc_host *mmc; 1149 1150 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); 1151 if (!mmc) 1152 return NULL; 1153 1154 host = mmc_priv(mmc); 1155 host->mmc = mmc; 1156 host->pdev = pdev; 1157 1158 return host; 1159 } 1160 EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc); 1161 1162 void tmio_mmc_host_free(struct tmio_mmc_host *host) 1163 { 1164 mmc_free_host(host->mmc); 1165 } 1166 EXPORT_SYMBOL_GPL(tmio_mmc_host_free); 1167 1168 int tmio_mmc_host_probe(struct tmio_mmc_host *_host, 1169 struct tmio_mmc_data *pdata, 1170 const struct tmio_mmc_dma_ops *dma_ops) 1171 { 1172 struct platform_device *pdev = _host->pdev; 1173 struct mmc_host *mmc = _host->mmc; 1174 struct resource *res_ctl; 1175 int ret; 1176 u32 irq_mask = TMIO_MASK_CMD; 1177 1178 tmio_mmc_of_parse(pdev, pdata); 1179 1180 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) 1181 _host->write16_hook = NULL; 1182 1183 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1184 if (!res_ctl) 1185 return -EINVAL; 1186 1187 ret = mmc_of_parse(mmc); 1188 if (ret < 0) 1189 return ret; 1190 1191 _host->pdata = pdata; 1192 platform_set_drvdata(pdev, mmc); 1193 1194 _host->set_pwr = pdata->set_pwr; 1195 _host->set_clk_div = pdata->set_clk_div; 1196 1197 ret = tmio_mmc_init_ocr(_host); 1198 if (ret < 0) 1199 return ret; 1200 1201 _host->ctl = devm_ioremap(&pdev->dev, 1202 res_ctl->start, resource_size(res_ctl)); 1203 if (!_host->ctl) 1204 return -ENOMEM; 1205 1206 tmio_mmc_ops.card_busy = _host->card_busy; 1207 tmio_mmc_ops.start_signal_voltage_switch = 1208 _host->start_signal_voltage_switch; 1209 mmc->ops = &tmio_mmc_ops; 1210 1211 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; 1212 mmc->caps2 |= pdata->capabilities2; 1213 mmc->max_segs = pdata->max_segs ? : 32; 1214 mmc->max_blk_size = 512; 1215 mmc->max_blk_count = pdata->max_blk_count ? : 1216 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; 1217 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1218 mmc->max_seg_size = mmc->max_req_size; 1219 1220 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || 1221 mmc->caps & MMC_CAP_NEEDS_POLL || 1222 !mmc_card_is_removable(mmc)); 1223 1224 /* 1225 * On Gen2+, eMMC with NONREMOVABLE currently fails because native 1226 * hotplug gets disabled. It seems RuntimePM related yet we need further 1227 * research. Since we are planning a PM overhaul anyway, let's enforce 1228 * for now the device being active by enabling native hotplug always. 1229 */ 1230 if (pdata->flags & TMIO_MMC_MIN_RCAR2) 1231 _host->native_hotplug = true; 1232 1233 if (tmio_mmc_clk_enable(_host) < 0) { 1234 mmc->f_max = pdata->hclk; 1235 mmc->f_min = mmc->f_max / 512; 1236 } 1237 1238 /* 1239 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from 1240 * looping forever... 1241 */ 1242 if (mmc->f_min == 0) 1243 return -EINVAL; 1244 1245 /* 1246 * While using internal tmio hardware logic for card detection, we need 1247 * to ensure it stays powered for it to work. 1248 */ 1249 if (_host->native_hotplug) 1250 pm_runtime_get_noresume(&pdev->dev); 1251 1252 _host->sdio_irq_enabled = false; 1253 if (pdata->flags & TMIO_MMC_SDIO_IRQ) 1254 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; 1255 1256 tmio_mmc_clk_stop(_host); 1257 tmio_mmc_reset(_host); 1258 1259 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK); 1260 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 1261 1262 /* Unmask the IRQs we want to know about */ 1263 if (!_host->chan_rx) 1264 irq_mask |= TMIO_MASK_READOP; 1265 if (!_host->chan_tx) 1266 irq_mask |= TMIO_MASK_WRITEOP; 1267 if (!_host->native_hotplug) 1268 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); 1269 1270 _host->sdcard_irq_mask &= ~irq_mask; 1271 1272 spin_lock_init(&_host->lock); 1273 mutex_init(&_host->ios_lock); 1274 1275 /* Init delayed work for request timeouts */ 1276 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 1277 INIT_WORK(&_host->done, tmio_mmc_done_work); 1278 1279 /* See if we also get DMA */ 1280 _host->dma_ops = dma_ops; 1281 tmio_mmc_request_dma(_host, pdata); 1282 1283 pm_runtime_set_active(&pdev->dev); 1284 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 1285 pm_runtime_use_autosuspend(&pdev->dev); 1286 pm_runtime_enable(&pdev->dev); 1287 1288 ret = mmc_add_host(mmc); 1289 if (ret < 0) { 1290 tmio_mmc_host_remove(_host); 1291 return ret; 1292 } 1293 1294 dev_pm_qos_expose_latency_limit(&pdev->dev, 100); 1295 1296 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { 1297 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); 1298 if (ret < 0) { 1299 tmio_mmc_host_remove(_host); 1300 return ret; 1301 } 1302 mmc_gpiod_request_cd_irq(mmc); 1303 } 1304 1305 return 0; 1306 } 1307 EXPORT_SYMBOL_GPL(tmio_mmc_host_probe); 1308 1309 void tmio_mmc_host_remove(struct tmio_mmc_host *host) 1310 { 1311 struct platform_device *pdev = host->pdev; 1312 struct mmc_host *mmc = host->mmc; 1313 1314 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) 1315 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 1316 1317 if (!host->native_hotplug) 1318 pm_runtime_get_sync(&pdev->dev); 1319 1320 dev_pm_qos_hide_latency_limit(&pdev->dev); 1321 1322 mmc_remove_host(mmc); 1323 cancel_work_sync(&host->done); 1324 cancel_delayed_work_sync(&host->delayed_reset_work); 1325 tmio_mmc_release_dma(host); 1326 1327 pm_runtime_put_sync(&pdev->dev); 1328 pm_runtime_disable(&pdev->dev); 1329 1330 tmio_mmc_clk_disable(host); 1331 } 1332 EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); 1333 1334 #ifdef CONFIG_PM 1335 int tmio_mmc_host_runtime_suspend(struct device *dev) 1336 { 1337 struct mmc_host *mmc = dev_get_drvdata(dev); 1338 struct tmio_mmc_host *host = mmc_priv(mmc); 1339 1340 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); 1341 1342 if (host->clk_cache) 1343 tmio_mmc_clk_stop(host); 1344 1345 tmio_mmc_clk_disable(host); 1346 1347 return 0; 1348 } 1349 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend); 1350 1351 static bool tmio_mmc_can_retune(struct tmio_mmc_host *host) 1352 { 1353 return host->tap_num && mmc_can_retune(host->mmc); 1354 } 1355 1356 int tmio_mmc_host_runtime_resume(struct device *dev) 1357 { 1358 struct mmc_host *mmc = dev_get_drvdata(dev); 1359 struct tmio_mmc_host *host = mmc_priv(mmc); 1360 1361 tmio_mmc_reset(host); 1362 tmio_mmc_clk_enable(host); 1363 1364 if (host->clk_cache) 1365 tmio_mmc_set_clock(host, host->clk_cache); 1366 1367 tmio_mmc_enable_dma(host, true); 1368 1369 if (tmio_mmc_can_retune(host) && host->select_tuning(host)) 1370 dev_warn(&host->pdev->dev, "Tuning selection failed\n"); 1371 1372 return 0; 1373 } 1374 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume); 1375 #endif 1376 1377 MODULE_LICENSE("GPL v2"); 1378