1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 53 54 void sdhci_dumpregs(struct sdhci_host *host) 55 { 56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 57 58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 92 sdhci_readl(host, SDHCI_RESPONSE), 93 sdhci_readl(host, SDHCI_RESPONSE + 4)); 94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE + 8), 96 sdhci_readl(host, SDHCI_RESPONSE + 12)); 97 SDHCI_DUMP("Host ctl2: 0x%08x\n", 98 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 99 100 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_64_BIT_DMA) { 102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 sdhci_readl(host, SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 106 } else { 107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 sdhci_readl(host, SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 110 } 111 } 112 113 if (host->ops->dump_vendor_regs) 114 host->ops->dump_vendor_regs(host); 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (1) { 220 bool timedout = ktime_after(ktime_get(), timeout); 221 222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 223 break; 224 if (timedout) { 225 pr_err("%s: Reset 0x%x never completed.\n", 226 mmc_hostname(host->mmc), (int)mask); 227 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 228 sdhci_dumpregs(host); 229 return; 230 } 231 udelay(10); 232 } 233 } 234 EXPORT_SYMBOL_GPL(sdhci_reset); 235 236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 { 238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 239 struct mmc_host *mmc = host->mmc; 240 241 if (!mmc->ops->get_cd(mmc)) 242 return false; 243 } 244 245 host->ops->reset(host, mask); 246 247 return true; 248 } 249 250 static void sdhci_reset_for_all(struct sdhci_host *host) 251 { 252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 254 if (host->ops->enable_dma) 255 host->ops->enable_dma(host); 256 } 257 /* Resetting the controller clears many */ 258 host->preset_enabled = false; 259 } 260 } 261 262 enum sdhci_reset_reason { 263 SDHCI_RESET_FOR_INIT, 264 SDHCI_RESET_FOR_REQUEST_ERROR, 265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 266 SDHCI_RESET_FOR_TUNING_ABORT, 267 SDHCI_RESET_FOR_CARD_REMOVED, 268 SDHCI_RESET_FOR_CQE_RECOVERY, 269 }; 270 271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 272 { 273 switch (reason) { 274 case SDHCI_RESET_FOR_INIT: 275 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 276 break; 277 case SDHCI_RESET_FOR_REQUEST_ERROR: 278 case SDHCI_RESET_FOR_TUNING_ABORT: 279 case SDHCI_RESET_FOR_CARD_REMOVED: 280 case SDHCI_RESET_FOR_CQE_RECOVERY: 281 sdhci_do_reset(host, SDHCI_RESET_CMD); 282 sdhci_do_reset(host, SDHCI_RESET_DATA); 283 break; 284 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 285 sdhci_do_reset(host, SDHCI_RESET_DATA); 286 break; 287 } 288 } 289 290 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 291 292 static void sdhci_set_default_irqs(struct sdhci_host *host) 293 { 294 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 295 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 296 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 297 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 298 SDHCI_INT_RESPONSE; 299 300 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 301 host->tuning_mode == SDHCI_TUNING_MODE_3) 302 host->ier |= SDHCI_INT_RETUNE; 303 304 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 305 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 306 } 307 308 static void sdhci_config_dma(struct sdhci_host *host) 309 { 310 u8 ctrl; 311 u16 ctrl2; 312 313 if (host->version < SDHCI_SPEC_200) 314 return; 315 316 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 317 318 /* 319 * Always adjust the DMA selection as some controllers 320 * (e.g. JMicron) can't do PIO properly when the selection 321 * is ADMA. 322 */ 323 ctrl &= ~SDHCI_CTRL_DMA_MASK; 324 if (!(host->flags & SDHCI_REQ_USE_DMA)) 325 goto out; 326 327 /* Note if DMA Select is zero then SDMA is selected */ 328 if (host->flags & SDHCI_USE_ADMA) 329 ctrl |= SDHCI_CTRL_ADMA32; 330 331 if (host->flags & SDHCI_USE_64_BIT_DMA) { 332 /* 333 * If v4 mode, all supported DMA can be 64-bit addressing if 334 * controller supports 64-bit system address, otherwise only 335 * ADMA can support 64-bit addressing. 336 */ 337 if (host->v4_mode) { 338 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 339 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 340 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 341 } else if (host->flags & SDHCI_USE_ADMA) { 342 /* 343 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 344 * set SDHCI_CTRL_ADMA64. 345 */ 346 ctrl |= SDHCI_CTRL_ADMA64; 347 } 348 } 349 350 out: 351 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 352 } 353 354 static void sdhci_init(struct sdhci_host *host, int soft) 355 { 356 struct mmc_host *mmc = host->mmc; 357 unsigned long flags; 358 359 if (soft) 360 sdhci_reset_for(host, INIT); 361 else 362 sdhci_reset_for_all(host); 363 364 if (host->v4_mode) 365 sdhci_do_enable_v4_mode(host); 366 367 spin_lock_irqsave(&host->lock, flags); 368 sdhci_set_default_irqs(host); 369 spin_unlock_irqrestore(&host->lock, flags); 370 371 host->cqe_on = false; 372 373 if (soft) { 374 /* force clock reconfiguration */ 375 host->clock = 0; 376 host->reinit_uhs = true; 377 mmc->ops->set_ios(mmc, &mmc->ios); 378 } 379 } 380 381 static void sdhci_reinit(struct sdhci_host *host) 382 { 383 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 384 385 sdhci_init(host, 0); 386 sdhci_enable_card_detection(host); 387 388 /* 389 * A change to the card detect bits indicates a change in present state, 390 * refer sdhci_set_card_detection(). A card detect interrupt might have 391 * been missed while the host controller was being reset, so trigger a 392 * rescan to check. 393 */ 394 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 395 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 396 } 397 398 static void __sdhci_led_activate(struct sdhci_host *host) 399 { 400 u8 ctrl; 401 402 if (host->quirks & SDHCI_QUIRK_NO_LED) 403 return; 404 405 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 406 ctrl |= SDHCI_CTRL_LED; 407 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 408 } 409 410 static void __sdhci_led_deactivate(struct sdhci_host *host) 411 { 412 u8 ctrl; 413 414 if (host->quirks & SDHCI_QUIRK_NO_LED) 415 return; 416 417 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 418 ctrl &= ~SDHCI_CTRL_LED; 419 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 420 } 421 422 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 423 static void sdhci_led_control(struct led_classdev *led, 424 enum led_brightness brightness) 425 { 426 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 427 unsigned long flags; 428 429 spin_lock_irqsave(&host->lock, flags); 430 431 if (host->runtime_suspended) 432 goto out; 433 434 if (brightness == LED_OFF) 435 __sdhci_led_deactivate(host); 436 else 437 __sdhci_led_activate(host); 438 out: 439 spin_unlock_irqrestore(&host->lock, flags); 440 } 441 442 static int sdhci_led_register(struct sdhci_host *host) 443 { 444 struct mmc_host *mmc = host->mmc; 445 446 if (host->quirks & SDHCI_QUIRK_NO_LED) 447 return 0; 448 449 snprintf(host->led_name, sizeof(host->led_name), 450 "%s::", mmc_hostname(mmc)); 451 452 host->led.name = host->led_name; 453 host->led.brightness = LED_OFF; 454 host->led.default_trigger = mmc_hostname(mmc); 455 host->led.brightness_set = sdhci_led_control; 456 457 return led_classdev_register(mmc_dev(mmc), &host->led); 458 } 459 460 static void sdhci_led_unregister(struct sdhci_host *host) 461 { 462 if (host->quirks & SDHCI_QUIRK_NO_LED) 463 return; 464 465 led_classdev_unregister(&host->led); 466 } 467 468 static inline void sdhci_led_activate(struct sdhci_host *host) 469 { 470 } 471 472 static inline void sdhci_led_deactivate(struct sdhci_host *host) 473 { 474 } 475 476 #else 477 478 static inline int sdhci_led_register(struct sdhci_host *host) 479 { 480 return 0; 481 } 482 483 static inline void sdhci_led_unregister(struct sdhci_host *host) 484 { 485 } 486 487 static inline void sdhci_led_activate(struct sdhci_host *host) 488 { 489 __sdhci_led_activate(host); 490 } 491 492 static inline void sdhci_led_deactivate(struct sdhci_host *host) 493 { 494 __sdhci_led_deactivate(host); 495 } 496 497 #endif 498 499 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 500 unsigned long timeout) 501 { 502 if (sdhci_data_line_cmd(mrq->cmd)) 503 mod_timer(&host->data_timer, timeout); 504 else 505 mod_timer(&host->timer, timeout); 506 } 507 508 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 509 { 510 if (sdhci_data_line_cmd(mrq->cmd)) 511 del_timer(&host->data_timer); 512 else 513 del_timer(&host->timer); 514 } 515 516 static inline bool sdhci_has_requests(struct sdhci_host *host) 517 { 518 return host->cmd || host->data_cmd; 519 } 520 521 /*****************************************************************************\ 522 * * 523 * Core functions * 524 * * 525 \*****************************************************************************/ 526 527 static void sdhci_read_block_pio(struct sdhci_host *host) 528 { 529 size_t blksize, len, chunk; 530 u32 scratch; 531 u8 *buf; 532 533 DBG("PIO reading\n"); 534 535 blksize = host->data->blksz; 536 chunk = 0; 537 538 while (blksize) { 539 BUG_ON(!sg_miter_next(&host->sg_miter)); 540 541 len = min(host->sg_miter.length, blksize); 542 543 blksize -= len; 544 host->sg_miter.consumed = len; 545 546 buf = host->sg_miter.addr; 547 548 while (len) { 549 if (chunk == 0) { 550 scratch = sdhci_readl(host, SDHCI_BUFFER); 551 chunk = 4; 552 } 553 554 *buf = scratch & 0xFF; 555 556 buf++; 557 scratch >>= 8; 558 chunk--; 559 len--; 560 } 561 } 562 563 sg_miter_stop(&host->sg_miter); 564 } 565 566 static void sdhci_write_block_pio(struct sdhci_host *host) 567 { 568 size_t blksize, len, chunk; 569 u32 scratch; 570 u8 *buf; 571 572 DBG("PIO writing\n"); 573 574 blksize = host->data->blksz; 575 chunk = 0; 576 scratch = 0; 577 578 while (blksize) { 579 BUG_ON(!sg_miter_next(&host->sg_miter)); 580 581 len = min(host->sg_miter.length, blksize); 582 583 blksize -= len; 584 host->sg_miter.consumed = len; 585 586 buf = host->sg_miter.addr; 587 588 while (len) { 589 scratch |= (u32)*buf << (chunk * 8); 590 591 buf++; 592 chunk++; 593 len--; 594 595 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 596 sdhci_writel(host, scratch, SDHCI_BUFFER); 597 chunk = 0; 598 scratch = 0; 599 } 600 } 601 } 602 603 sg_miter_stop(&host->sg_miter); 604 } 605 606 static void sdhci_transfer_pio(struct sdhci_host *host) 607 { 608 u32 mask; 609 610 if (host->blocks == 0) 611 return; 612 613 if (host->data->flags & MMC_DATA_READ) 614 mask = SDHCI_DATA_AVAILABLE; 615 else 616 mask = SDHCI_SPACE_AVAILABLE; 617 618 /* 619 * Some controllers (JMicron JMB38x) mess up the buffer bits 620 * for transfers < 4 bytes. As long as it is just one block, 621 * we can ignore the bits. 622 */ 623 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 624 (host->data->blocks == 1)) 625 mask = ~0; 626 627 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 628 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 629 udelay(100); 630 631 if (host->data->flags & MMC_DATA_READ) 632 sdhci_read_block_pio(host); 633 else 634 sdhci_write_block_pio(host); 635 636 host->blocks--; 637 if (host->blocks == 0) 638 break; 639 } 640 641 DBG("PIO transfer complete.\n"); 642 } 643 644 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 645 struct mmc_data *data, int cookie) 646 { 647 int sg_count; 648 649 /* 650 * If the data buffers are already mapped, return the previous 651 * dma_map_sg() result. 652 */ 653 if (data->host_cookie == COOKIE_PRE_MAPPED) 654 return data->sg_count; 655 656 /* Bounce write requests to the bounce buffer */ 657 if (host->bounce_buffer) { 658 unsigned int length = data->blksz * data->blocks; 659 660 if (length > host->bounce_buffer_size) { 661 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 662 mmc_hostname(host->mmc), length, 663 host->bounce_buffer_size); 664 return -EIO; 665 } 666 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 667 /* Copy the data to the bounce buffer */ 668 if (host->ops->copy_to_bounce_buffer) { 669 host->ops->copy_to_bounce_buffer(host, 670 data, length); 671 } else { 672 sg_copy_to_buffer(data->sg, data->sg_len, 673 host->bounce_buffer, length); 674 } 675 } 676 /* Switch ownership to the DMA */ 677 dma_sync_single_for_device(mmc_dev(host->mmc), 678 host->bounce_addr, 679 host->bounce_buffer_size, 680 mmc_get_dma_dir(data)); 681 /* Just a dummy value */ 682 sg_count = 1; 683 } else { 684 /* Just access the data directly from memory */ 685 sg_count = dma_map_sg(mmc_dev(host->mmc), 686 data->sg, data->sg_len, 687 mmc_get_dma_dir(data)); 688 } 689 690 if (sg_count == 0) 691 return -ENOSPC; 692 693 data->sg_count = sg_count; 694 data->host_cookie = cookie; 695 696 return sg_count; 697 } 698 699 static char *sdhci_kmap_atomic(struct scatterlist *sg) 700 { 701 return kmap_local_page(sg_page(sg)) + sg->offset; 702 } 703 704 static void sdhci_kunmap_atomic(void *buffer) 705 { 706 kunmap_local(buffer); 707 } 708 709 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 710 dma_addr_t addr, int len, unsigned int cmd) 711 { 712 struct sdhci_adma2_64_desc *dma_desc = *desc; 713 714 /* 32-bit and 64-bit descriptors have these members in same position */ 715 dma_desc->cmd = cpu_to_le16(cmd); 716 dma_desc->len = cpu_to_le16(len); 717 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 718 719 if (host->flags & SDHCI_USE_64_BIT_DMA) 720 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 721 722 *desc += host->desc_sz; 723 } 724 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 725 726 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 727 void **desc, dma_addr_t addr, 728 int len, unsigned int cmd) 729 { 730 if (host->ops->adma_write_desc) 731 host->ops->adma_write_desc(host, desc, addr, len, cmd); 732 else 733 sdhci_adma_write_desc(host, desc, addr, len, cmd); 734 } 735 736 static void sdhci_adma_mark_end(void *desc) 737 { 738 struct sdhci_adma2_64_desc *dma_desc = desc; 739 740 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 741 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 742 } 743 744 static void sdhci_adma_table_pre(struct sdhci_host *host, 745 struct mmc_data *data, int sg_count) 746 { 747 struct scatterlist *sg; 748 dma_addr_t addr, align_addr; 749 void *desc, *align; 750 char *buffer; 751 int len, offset, i; 752 753 /* 754 * The spec does not specify endianness of descriptor table. 755 * We currently guess that it is LE. 756 */ 757 758 host->sg_count = sg_count; 759 760 desc = host->adma_table; 761 align = host->align_buffer; 762 763 align_addr = host->align_addr; 764 765 for_each_sg(data->sg, sg, host->sg_count, i) { 766 addr = sg_dma_address(sg); 767 len = sg_dma_len(sg); 768 769 /* 770 * The SDHCI specification states that ADMA addresses must 771 * be 32-bit aligned. If they aren't, then we use a bounce 772 * buffer for the (up to three) bytes that screw up the 773 * alignment. 774 */ 775 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 776 SDHCI_ADMA2_MASK; 777 if (offset) { 778 if (data->flags & MMC_DATA_WRITE) { 779 buffer = sdhci_kmap_atomic(sg); 780 memcpy(align, buffer, offset); 781 sdhci_kunmap_atomic(buffer); 782 } 783 784 /* tran, valid */ 785 __sdhci_adma_write_desc(host, &desc, align_addr, 786 offset, ADMA2_TRAN_VALID); 787 788 BUG_ON(offset > 65536); 789 790 align += SDHCI_ADMA2_ALIGN; 791 align_addr += SDHCI_ADMA2_ALIGN; 792 793 addr += offset; 794 len -= offset; 795 } 796 797 /* 798 * The block layer forces a minimum segment size of PAGE_SIZE, 799 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 800 * multiple descriptors, noting that the ADMA table is sized 801 * for 4KiB chunks anyway, so it will be big enough. 802 */ 803 while (len > host->max_adma) { 804 int n = 32 * 1024; /* 32KiB*/ 805 806 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 807 addr += n; 808 len -= n; 809 } 810 811 /* tran, valid */ 812 if (len) 813 __sdhci_adma_write_desc(host, &desc, addr, len, 814 ADMA2_TRAN_VALID); 815 816 /* 817 * If this triggers then we have a calculation bug 818 * somewhere. :/ 819 */ 820 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 821 } 822 823 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 824 /* Mark the last descriptor as the terminating descriptor */ 825 if (desc != host->adma_table) { 826 desc -= host->desc_sz; 827 sdhci_adma_mark_end(desc); 828 } 829 } else { 830 /* Add a terminating entry - nop, end, valid */ 831 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 832 } 833 } 834 835 static void sdhci_adma_table_post(struct sdhci_host *host, 836 struct mmc_data *data) 837 { 838 struct scatterlist *sg; 839 int i, size; 840 void *align; 841 char *buffer; 842 843 if (data->flags & MMC_DATA_READ) { 844 bool has_unaligned = false; 845 846 /* Do a quick scan of the SG list for any unaligned mappings */ 847 for_each_sg(data->sg, sg, host->sg_count, i) 848 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 849 has_unaligned = true; 850 break; 851 } 852 853 if (has_unaligned) { 854 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 855 data->sg_len, DMA_FROM_DEVICE); 856 857 align = host->align_buffer; 858 859 for_each_sg(data->sg, sg, host->sg_count, i) { 860 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 861 size = SDHCI_ADMA2_ALIGN - 862 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 863 864 buffer = sdhci_kmap_atomic(sg); 865 memcpy(buffer, align, size); 866 sdhci_kunmap_atomic(buffer); 867 868 align += SDHCI_ADMA2_ALIGN; 869 } 870 } 871 } 872 } 873 } 874 875 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 876 { 877 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 878 if (host->flags & SDHCI_USE_64_BIT_DMA) 879 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 880 } 881 882 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 883 { 884 if (host->bounce_buffer) 885 return host->bounce_addr; 886 else 887 return sg_dma_address(host->data->sg); 888 } 889 890 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 891 { 892 if (host->v4_mode) 893 sdhci_set_adma_addr(host, addr); 894 else 895 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 896 } 897 898 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 899 struct mmc_command *cmd, 900 struct mmc_data *data) 901 { 902 unsigned int target_timeout; 903 904 /* timeout in us */ 905 if (!data) { 906 target_timeout = cmd->busy_timeout * 1000; 907 } else { 908 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 909 if (host->clock && data->timeout_clks) { 910 unsigned long long val; 911 912 /* 913 * data->timeout_clks is in units of clock cycles. 914 * host->clock is in Hz. target_timeout is in us. 915 * Hence, us = 1000000 * cycles / Hz. Round up. 916 */ 917 val = 1000000ULL * data->timeout_clks; 918 if (do_div(val, host->clock)) 919 target_timeout++; 920 target_timeout += val; 921 } 922 } 923 924 return target_timeout; 925 } 926 927 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 928 struct mmc_command *cmd) 929 { 930 struct mmc_data *data = cmd->data; 931 struct mmc_host *mmc = host->mmc; 932 struct mmc_ios *ios = &mmc->ios; 933 unsigned char bus_width = 1 << ios->bus_width; 934 unsigned int blksz; 935 unsigned int freq; 936 u64 target_timeout; 937 u64 transfer_time; 938 939 target_timeout = sdhci_target_timeout(host, cmd, data); 940 target_timeout *= NSEC_PER_USEC; 941 942 if (data) { 943 blksz = data->blksz; 944 freq = mmc->actual_clock ? : host->clock; 945 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 946 do_div(transfer_time, freq); 947 /* multiply by '2' to account for any unknowns */ 948 transfer_time = transfer_time * 2; 949 /* calculate timeout for the entire data */ 950 host->data_timeout = data->blocks * target_timeout + 951 transfer_time; 952 } else { 953 host->data_timeout = target_timeout; 954 } 955 956 if (host->data_timeout) 957 host->data_timeout += MMC_CMD_TRANSFER_TIME; 958 } 959 960 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 961 bool *too_big) 962 { 963 u8 count; 964 struct mmc_data *data; 965 unsigned target_timeout, current_timeout; 966 967 *too_big = false; 968 969 /* 970 * If the host controller provides us with an incorrect timeout 971 * value, just skip the check and use the maximum. The hardware may take 972 * longer to time out, but that's much better than having a too-short 973 * timeout value. 974 */ 975 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 976 return host->max_timeout_count; 977 978 /* Unspecified command, assume max */ 979 if (cmd == NULL) 980 return host->max_timeout_count; 981 982 data = cmd->data; 983 /* Unspecified timeout, assume max */ 984 if (!data && !cmd->busy_timeout) 985 return host->max_timeout_count; 986 987 /* timeout in us */ 988 target_timeout = sdhci_target_timeout(host, cmd, data); 989 990 /* 991 * Figure out needed cycles. 992 * We do this in steps in order to fit inside a 32 bit int. 993 * The first step is the minimum timeout, which will have a 994 * minimum resolution of 6 bits: 995 * (1) 2^13*1000 > 2^22, 996 * (2) host->timeout_clk < 2^16 997 * => 998 * (1) / (2) > 2^6 999 */ 1000 count = 0; 1001 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1002 while (current_timeout < target_timeout) { 1003 count++; 1004 current_timeout <<= 1; 1005 if (count > host->max_timeout_count) { 1006 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1007 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1008 count, cmd->opcode); 1009 count = host->max_timeout_count; 1010 *too_big = true; 1011 break; 1012 } 1013 } 1014 1015 return count; 1016 } 1017 1018 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1019 { 1020 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1021 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1022 1023 if (host->flags & SDHCI_REQ_USE_DMA) 1024 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1025 else 1026 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1027 1028 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1029 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1030 else 1031 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1032 1033 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1034 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1035 } 1036 1037 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1038 { 1039 if (enable) 1040 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1041 else 1042 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1043 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1044 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1045 } 1046 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1047 1048 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1049 { 1050 bool too_big = false; 1051 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1052 1053 if (too_big && 1054 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1055 sdhci_calc_sw_timeout(host, cmd); 1056 sdhci_set_data_timeout_irq(host, false); 1057 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1058 sdhci_set_data_timeout_irq(host, true); 1059 } 1060 1061 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1062 } 1063 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1064 1065 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1066 { 1067 if (host->ops->set_timeout) 1068 host->ops->set_timeout(host, cmd); 1069 else 1070 __sdhci_set_timeout(host, cmd); 1071 } 1072 1073 static void sdhci_initialize_data(struct sdhci_host *host, 1074 struct mmc_data *data) 1075 { 1076 WARN_ON(host->data); 1077 1078 /* Sanity checks */ 1079 BUG_ON(data->blksz * data->blocks > 524288); 1080 BUG_ON(data->blksz > host->mmc->max_blk_size); 1081 BUG_ON(data->blocks > 65535); 1082 1083 host->data = data; 1084 host->data_early = 0; 1085 host->data->bytes_xfered = 0; 1086 } 1087 1088 static inline void sdhci_set_block_info(struct sdhci_host *host, 1089 struct mmc_data *data) 1090 { 1091 /* Set the DMA boundary value and block size */ 1092 sdhci_writew(host, 1093 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1094 SDHCI_BLOCK_SIZE); 1095 /* 1096 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1097 * can be supported, in that case 16-bit block count register must be 0. 1098 */ 1099 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1100 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1101 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1102 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1103 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1104 } else { 1105 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1106 } 1107 } 1108 1109 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1110 { 1111 struct mmc_data *data = cmd->data; 1112 1113 sdhci_initialize_data(host, data); 1114 1115 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1116 struct scatterlist *sg; 1117 unsigned int length_mask, offset_mask; 1118 int i; 1119 1120 host->flags |= SDHCI_REQ_USE_DMA; 1121 1122 /* 1123 * FIXME: This doesn't account for merging when mapping the 1124 * scatterlist. 1125 * 1126 * The assumption here being that alignment and lengths are 1127 * the same after DMA mapping to device address space. 1128 */ 1129 length_mask = 0; 1130 offset_mask = 0; 1131 if (host->flags & SDHCI_USE_ADMA) { 1132 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1133 length_mask = 3; 1134 /* 1135 * As we use up to 3 byte chunks to work 1136 * around alignment problems, we need to 1137 * check the offset as well. 1138 */ 1139 offset_mask = 3; 1140 } 1141 } else { 1142 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1143 length_mask = 3; 1144 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1145 offset_mask = 3; 1146 } 1147 1148 if (unlikely(length_mask | offset_mask)) { 1149 for_each_sg(data->sg, sg, data->sg_len, i) { 1150 if (sg->length & length_mask) { 1151 DBG("Reverting to PIO because of transfer size (%d)\n", 1152 sg->length); 1153 host->flags &= ~SDHCI_REQ_USE_DMA; 1154 break; 1155 } 1156 if (sg->offset & offset_mask) { 1157 DBG("Reverting to PIO because of bad alignment\n"); 1158 host->flags &= ~SDHCI_REQ_USE_DMA; 1159 break; 1160 } 1161 } 1162 } 1163 } 1164 1165 if (host->flags & SDHCI_REQ_USE_DMA) { 1166 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1167 1168 if (sg_cnt <= 0) { 1169 /* 1170 * This only happens when someone fed 1171 * us an invalid request. 1172 */ 1173 WARN_ON(1); 1174 host->flags &= ~SDHCI_REQ_USE_DMA; 1175 } else if (host->flags & SDHCI_USE_ADMA) { 1176 sdhci_adma_table_pre(host, data, sg_cnt); 1177 sdhci_set_adma_addr(host, host->adma_addr); 1178 } else { 1179 WARN_ON(sg_cnt != 1); 1180 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1181 } 1182 } 1183 1184 sdhci_config_dma(host); 1185 1186 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1187 int flags; 1188 1189 flags = SG_MITER_ATOMIC; 1190 if (host->data->flags & MMC_DATA_READ) 1191 flags |= SG_MITER_TO_SG; 1192 else 1193 flags |= SG_MITER_FROM_SG; 1194 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1195 host->blocks = data->blocks; 1196 } 1197 1198 sdhci_set_transfer_irqs(host); 1199 1200 sdhci_set_block_info(host, data); 1201 } 1202 1203 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1204 1205 static int sdhci_external_dma_init(struct sdhci_host *host) 1206 { 1207 int ret = 0; 1208 struct mmc_host *mmc = host->mmc; 1209 1210 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1211 if (IS_ERR(host->tx_chan)) { 1212 ret = PTR_ERR(host->tx_chan); 1213 if (ret != -EPROBE_DEFER) 1214 pr_warn("Failed to request TX DMA channel.\n"); 1215 host->tx_chan = NULL; 1216 return ret; 1217 } 1218 1219 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1220 if (IS_ERR(host->rx_chan)) { 1221 if (host->tx_chan) { 1222 dma_release_channel(host->tx_chan); 1223 host->tx_chan = NULL; 1224 } 1225 1226 ret = PTR_ERR(host->rx_chan); 1227 if (ret != -EPROBE_DEFER) 1228 pr_warn("Failed to request RX DMA channel.\n"); 1229 host->rx_chan = NULL; 1230 } 1231 1232 return ret; 1233 } 1234 1235 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1236 struct mmc_data *data) 1237 { 1238 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1239 } 1240 1241 static int sdhci_external_dma_setup(struct sdhci_host *host, 1242 struct mmc_command *cmd) 1243 { 1244 int ret, i; 1245 enum dma_transfer_direction dir; 1246 struct dma_async_tx_descriptor *desc; 1247 struct mmc_data *data = cmd->data; 1248 struct dma_chan *chan; 1249 struct dma_slave_config cfg; 1250 dma_cookie_t cookie; 1251 int sg_cnt; 1252 1253 if (!host->mapbase) 1254 return -EINVAL; 1255 1256 memset(&cfg, 0, sizeof(cfg)); 1257 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1258 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1259 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1260 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1261 cfg.src_maxburst = data->blksz / 4; 1262 cfg.dst_maxburst = data->blksz / 4; 1263 1264 /* Sanity check: all the SG entries must be aligned by block size. */ 1265 for (i = 0; i < data->sg_len; i++) { 1266 if ((data->sg + i)->length % data->blksz) 1267 return -EINVAL; 1268 } 1269 1270 chan = sdhci_external_dma_channel(host, data); 1271 1272 ret = dmaengine_slave_config(chan, &cfg); 1273 if (ret) 1274 return ret; 1275 1276 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1277 if (sg_cnt <= 0) 1278 return -EINVAL; 1279 1280 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1281 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1282 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1283 if (!desc) 1284 return -EINVAL; 1285 1286 desc->callback = NULL; 1287 desc->callback_param = NULL; 1288 1289 cookie = dmaengine_submit(desc); 1290 if (dma_submit_error(cookie)) 1291 ret = cookie; 1292 1293 return ret; 1294 } 1295 1296 static void sdhci_external_dma_release(struct sdhci_host *host) 1297 { 1298 if (host->tx_chan) { 1299 dma_release_channel(host->tx_chan); 1300 host->tx_chan = NULL; 1301 } 1302 1303 if (host->rx_chan) { 1304 dma_release_channel(host->rx_chan); 1305 host->rx_chan = NULL; 1306 } 1307 1308 sdhci_switch_external_dma(host, false); 1309 } 1310 1311 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1312 struct mmc_command *cmd) 1313 { 1314 struct mmc_data *data = cmd->data; 1315 1316 sdhci_initialize_data(host, data); 1317 1318 host->flags |= SDHCI_REQ_USE_DMA; 1319 sdhci_set_transfer_irqs(host); 1320 1321 sdhci_set_block_info(host, data); 1322 } 1323 1324 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1325 struct mmc_command *cmd) 1326 { 1327 if (!sdhci_external_dma_setup(host, cmd)) { 1328 __sdhci_external_dma_prepare_data(host, cmd); 1329 } else { 1330 sdhci_external_dma_release(host); 1331 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1332 mmc_hostname(host->mmc)); 1333 sdhci_prepare_data(host, cmd); 1334 } 1335 } 1336 1337 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1338 struct mmc_command *cmd) 1339 { 1340 struct dma_chan *chan; 1341 1342 if (!cmd->data) 1343 return; 1344 1345 chan = sdhci_external_dma_channel(host, cmd->data); 1346 if (chan) 1347 dma_async_issue_pending(chan); 1348 } 1349 1350 #else 1351 1352 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1353 { 1354 return -EOPNOTSUPP; 1355 } 1356 1357 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1358 { 1359 } 1360 1361 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1362 struct mmc_command *cmd) 1363 { 1364 /* This should never happen */ 1365 WARN_ON_ONCE(1); 1366 } 1367 1368 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1369 struct mmc_command *cmd) 1370 { 1371 } 1372 1373 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1374 struct mmc_data *data) 1375 { 1376 return NULL; 1377 } 1378 1379 #endif 1380 1381 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1382 { 1383 host->use_external_dma = en; 1384 } 1385 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1386 1387 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1388 struct mmc_request *mrq) 1389 { 1390 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1391 !mrq->cap_cmd_during_tfr; 1392 } 1393 1394 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1395 struct mmc_request *mrq) 1396 { 1397 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1398 } 1399 1400 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1401 struct mmc_request *mrq) 1402 { 1403 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1404 } 1405 1406 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1407 struct mmc_command *cmd, 1408 u16 *mode) 1409 { 1410 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1411 (cmd->opcode != SD_IO_RW_EXTENDED); 1412 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1413 u16 ctrl2; 1414 1415 /* 1416 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1417 * Select' is recommended rather than use of 'Auto CMD12 1418 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1419 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1420 */ 1421 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1422 (use_cmd12 || use_cmd23)) { 1423 *mode |= SDHCI_TRNS_AUTO_SEL; 1424 1425 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1426 if (use_cmd23) 1427 ctrl2 |= SDHCI_CMD23_ENABLE; 1428 else 1429 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1430 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1431 1432 return; 1433 } 1434 1435 /* 1436 * If we are sending CMD23, CMD12 never gets sent 1437 * on successful completion (so no Auto-CMD12). 1438 */ 1439 if (use_cmd12) 1440 *mode |= SDHCI_TRNS_AUTO_CMD12; 1441 else if (use_cmd23) 1442 *mode |= SDHCI_TRNS_AUTO_CMD23; 1443 } 1444 1445 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1446 struct mmc_command *cmd) 1447 { 1448 u16 mode = 0; 1449 struct mmc_data *data = cmd->data; 1450 1451 if (data == NULL) { 1452 if (host->quirks2 & 1453 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1454 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1455 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1456 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1457 } else { 1458 /* clear Auto CMD settings for no data CMDs */ 1459 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1460 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1461 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1462 } 1463 return; 1464 } 1465 1466 WARN_ON(!host->data); 1467 1468 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1469 mode = SDHCI_TRNS_BLK_CNT_EN; 1470 1471 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1472 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1473 sdhci_auto_cmd_select(host, cmd, &mode); 1474 if (sdhci_auto_cmd23(host, cmd->mrq)) 1475 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1476 } 1477 1478 if (data->flags & MMC_DATA_READ) 1479 mode |= SDHCI_TRNS_READ; 1480 if (host->flags & SDHCI_REQ_USE_DMA) 1481 mode |= SDHCI_TRNS_DMA; 1482 1483 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1484 } 1485 1486 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1487 { 1488 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1489 ((mrq->cmd && mrq->cmd->error) || 1490 (mrq->sbc && mrq->sbc->error) || 1491 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1492 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1493 } 1494 1495 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1496 { 1497 int i; 1498 1499 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1500 if (host->mrqs_done[i] == mrq) { 1501 WARN_ON(1); 1502 return; 1503 } 1504 } 1505 1506 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1507 if (!host->mrqs_done[i]) { 1508 host->mrqs_done[i] = mrq; 1509 break; 1510 } 1511 } 1512 1513 WARN_ON(i >= SDHCI_MAX_MRQS); 1514 } 1515 1516 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1517 { 1518 if (host->cmd && host->cmd->mrq == mrq) 1519 host->cmd = NULL; 1520 1521 if (host->data_cmd && host->data_cmd->mrq == mrq) 1522 host->data_cmd = NULL; 1523 1524 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1525 host->deferred_cmd = NULL; 1526 1527 if (host->data && host->data->mrq == mrq) 1528 host->data = NULL; 1529 1530 if (sdhci_needs_reset(host, mrq)) 1531 host->pending_reset = true; 1532 1533 sdhci_set_mrq_done(host, mrq); 1534 1535 sdhci_del_timer(host, mrq); 1536 1537 if (!sdhci_has_requests(host)) 1538 sdhci_led_deactivate(host); 1539 } 1540 1541 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1542 { 1543 __sdhci_finish_mrq(host, mrq); 1544 1545 queue_work(host->complete_wq, &host->complete_work); 1546 } 1547 1548 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1549 { 1550 struct mmc_command *data_cmd = host->data_cmd; 1551 struct mmc_data *data = host->data; 1552 1553 host->data = NULL; 1554 host->data_cmd = NULL; 1555 1556 /* 1557 * The controller needs a reset of internal state machines upon error 1558 * conditions. 1559 */ 1560 if (data->error) { 1561 if (!host->cmd || host->cmd == data_cmd) 1562 sdhci_reset_for(host, REQUEST_ERROR); 1563 else 1564 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1565 } 1566 1567 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1568 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1569 sdhci_adma_table_post(host, data); 1570 1571 /* 1572 * The specification states that the block count register must 1573 * be updated, but it does not specify at what point in the 1574 * data flow. That makes the register entirely useless to read 1575 * back so we have to assume that nothing made it to the card 1576 * in the event of an error. 1577 */ 1578 if (data->error) 1579 data->bytes_xfered = 0; 1580 else 1581 data->bytes_xfered = data->blksz * data->blocks; 1582 1583 /* 1584 * Need to send CMD12 if - 1585 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1586 * b) error in multiblock transfer 1587 */ 1588 if (data->stop && 1589 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1590 data->error)) { 1591 /* 1592 * 'cap_cmd_during_tfr' request must not use the command line 1593 * after mmc_command_done() has been called. It is upper layer's 1594 * responsibility to send the stop command if required. 1595 */ 1596 if (data->mrq->cap_cmd_during_tfr) { 1597 __sdhci_finish_mrq(host, data->mrq); 1598 } else { 1599 /* Avoid triggering warning in sdhci_send_command() */ 1600 host->cmd = NULL; 1601 if (!sdhci_send_command(host, data->stop)) { 1602 if (sw_data_timeout) { 1603 /* 1604 * This is anyway a sw data timeout, so 1605 * give up now. 1606 */ 1607 data->stop->error = -EIO; 1608 __sdhci_finish_mrq(host, data->mrq); 1609 } else { 1610 WARN_ON(host->deferred_cmd); 1611 host->deferred_cmd = data->stop; 1612 } 1613 } 1614 } 1615 } else { 1616 __sdhci_finish_mrq(host, data->mrq); 1617 } 1618 } 1619 1620 static void sdhci_finish_data(struct sdhci_host *host) 1621 { 1622 __sdhci_finish_data(host, false); 1623 } 1624 1625 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1626 { 1627 int flags; 1628 u32 mask; 1629 unsigned long timeout; 1630 1631 WARN_ON(host->cmd); 1632 1633 /* Initially, a command has no error */ 1634 cmd->error = 0; 1635 1636 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1637 cmd->opcode == MMC_STOP_TRANSMISSION) 1638 cmd->flags |= MMC_RSP_BUSY; 1639 1640 mask = SDHCI_CMD_INHIBIT; 1641 if (sdhci_data_line_cmd(cmd)) 1642 mask |= SDHCI_DATA_INHIBIT; 1643 1644 /* We shouldn't wait for data inihibit for stop commands, even 1645 though they might use busy signaling */ 1646 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1647 mask &= ~SDHCI_DATA_INHIBIT; 1648 1649 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1650 return false; 1651 1652 host->cmd = cmd; 1653 host->data_timeout = 0; 1654 if (sdhci_data_line_cmd(cmd)) { 1655 WARN_ON(host->data_cmd); 1656 host->data_cmd = cmd; 1657 sdhci_set_timeout(host, cmd); 1658 } 1659 1660 if (cmd->data) { 1661 if (host->use_external_dma) 1662 sdhci_external_dma_prepare_data(host, cmd); 1663 else 1664 sdhci_prepare_data(host, cmd); 1665 } 1666 1667 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1668 1669 sdhci_set_transfer_mode(host, cmd); 1670 1671 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1672 WARN_ONCE(1, "Unsupported response type!\n"); 1673 /* 1674 * This does not happen in practice because 136-bit response 1675 * commands never have busy waiting, so rather than complicate 1676 * the error path, just remove busy waiting and continue. 1677 */ 1678 cmd->flags &= ~MMC_RSP_BUSY; 1679 } 1680 1681 if (!(cmd->flags & MMC_RSP_PRESENT)) 1682 flags = SDHCI_CMD_RESP_NONE; 1683 else if (cmd->flags & MMC_RSP_136) 1684 flags = SDHCI_CMD_RESP_LONG; 1685 else if (cmd->flags & MMC_RSP_BUSY) 1686 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1687 else 1688 flags = SDHCI_CMD_RESP_SHORT; 1689 1690 if (cmd->flags & MMC_RSP_CRC) 1691 flags |= SDHCI_CMD_CRC; 1692 if (cmd->flags & MMC_RSP_OPCODE) 1693 flags |= SDHCI_CMD_INDEX; 1694 1695 /* CMD19 is special in that the Data Present Select should be set */ 1696 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1697 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1698 flags |= SDHCI_CMD_DATA; 1699 1700 timeout = jiffies; 1701 if (host->data_timeout) 1702 timeout += nsecs_to_jiffies(host->data_timeout); 1703 else if (!cmd->data && cmd->busy_timeout > 9000) 1704 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1705 else 1706 timeout += 10 * HZ; 1707 sdhci_mod_timer(host, cmd->mrq, timeout); 1708 1709 if (host->use_external_dma) 1710 sdhci_external_dma_pre_transfer(host, cmd); 1711 1712 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1713 1714 return true; 1715 } 1716 1717 static bool sdhci_present_error(struct sdhci_host *host, 1718 struct mmc_command *cmd, bool present) 1719 { 1720 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1721 cmd->error = -ENOMEDIUM; 1722 return true; 1723 } 1724 1725 return false; 1726 } 1727 1728 static bool sdhci_send_command_retry(struct sdhci_host *host, 1729 struct mmc_command *cmd, 1730 unsigned long flags) 1731 __releases(host->lock) 1732 __acquires(host->lock) 1733 { 1734 struct mmc_command *deferred_cmd = host->deferred_cmd; 1735 int timeout = 10; /* Approx. 10 ms */ 1736 bool present; 1737 1738 while (!sdhci_send_command(host, cmd)) { 1739 if (!timeout--) { 1740 pr_err("%s: Controller never released inhibit bit(s).\n", 1741 mmc_hostname(host->mmc)); 1742 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1743 sdhci_dumpregs(host); 1744 cmd->error = -EIO; 1745 return false; 1746 } 1747 1748 spin_unlock_irqrestore(&host->lock, flags); 1749 1750 usleep_range(1000, 1250); 1751 1752 present = host->mmc->ops->get_cd(host->mmc); 1753 1754 spin_lock_irqsave(&host->lock, flags); 1755 1756 /* A deferred command might disappear, handle that */ 1757 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1758 return true; 1759 1760 if (sdhci_present_error(host, cmd, present)) 1761 return false; 1762 } 1763 1764 if (cmd == host->deferred_cmd) 1765 host->deferred_cmd = NULL; 1766 1767 return true; 1768 } 1769 1770 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1771 { 1772 int i, reg; 1773 1774 for (i = 0; i < 4; i++) { 1775 reg = SDHCI_RESPONSE + (3 - i) * 4; 1776 cmd->resp[i] = sdhci_readl(host, reg); 1777 } 1778 1779 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1780 return; 1781 1782 /* CRC is stripped so we need to do some shifting */ 1783 for (i = 0; i < 4; i++) { 1784 cmd->resp[i] <<= 8; 1785 if (i != 3) 1786 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1787 } 1788 } 1789 1790 static void sdhci_finish_command(struct sdhci_host *host) 1791 { 1792 struct mmc_command *cmd = host->cmd; 1793 1794 host->cmd = NULL; 1795 1796 if (cmd->flags & MMC_RSP_PRESENT) { 1797 if (cmd->flags & MMC_RSP_136) { 1798 sdhci_read_rsp_136(host, cmd); 1799 } else { 1800 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1801 } 1802 } 1803 1804 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1805 mmc_command_done(host->mmc, cmd->mrq); 1806 1807 /* 1808 * The host can send and interrupt when the busy state has 1809 * ended, allowing us to wait without wasting CPU cycles. 1810 * The busy signal uses DAT0 so this is similar to waiting 1811 * for data to complete. 1812 * 1813 * Note: The 1.0 specification is a bit ambiguous about this 1814 * feature so there might be some problems with older 1815 * controllers. 1816 */ 1817 if (cmd->flags & MMC_RSP_BUSY) { 1818 if (cmd->data) { 1819 DBG("Cannot wait for busy signal when also doing a data transfer"); 1820 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1821 cmd == host->data_cmd) { 1822 /* Command complete before busy is ended */ 1823 return; 1824 } 1825 } 1826 1827 /* Finished CMD23, now send actual command. */ 1828 if (cmd == cmd->mrq->sbc) { 1829 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1830 WARN_ON(host->deferred_cmd); 1831 host->deferred_cmd = cmd->mrq->cmd; 1832 } 1833 } else { 1834 1835 /* Processed actual command. */ 1836 if (host->data && host->data_early) 1837 sdhci_finish_data(host); 1838 1839 if (!cmd->data) 1840 __sdhci_finish_mrq(host, cmd->mrq); 1841 } 1842 } 1843 1844 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1845 { 1846 u16 preset = 0; 1847 1848 switch (host->timing) { 1849 case MMC_TIMING_MMC_HS: 1850 case MMC_TIMING_SD_HS: 1851 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1852 break; 1853 case MMC_TIMING_UHS_SDR12: 1854 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1855 break; 1856 case MMC_TIMING_UHS_SDR25: 1857 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1858 break; 1859 case MMC_TIMING_UHS_SDR50: 1860 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1861 break; 1862 case MMC_TIMING_UHS_SDR104: 1863 case MMC_TIMING_MMC_HS200: 1864 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1865 break; 1866 case MMC_TIMING_UHS_DDR50: 1867 case MMC_TIMING_MMC_DDR52: 1868 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1869 break; 1870 case MMC_TIMING_MMC_HS400: 1871 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1872 break; 1873 default: 1874 pr_warn("%s: Invalid UHS-I mode selected\n", 1875 mmc_hostname(host->mmc)); 1876 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1877 break; 1878 } 1879 return preset; 1880 } 1881 1882 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1883 unsigned int *actual_clock) 1884 { 1885 int div = 0; /* Initialized for compiler warning */ 1886 int real_div = div, clk_mul = 1; 1887 u16 clk = 0; 1888 bool switch_base_clk = false; 1889 1890 if (host->version >= SDHCI_SPEC_300) { 1891 if (host->preset_enabled) { 1892 u16 pre_val; 1893 1894 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1895 pre_val = sdhci_get_preset_value(host); 1896 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1897 if (host->clk_mul && 1898 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1899 clk = SDHCI_PROG_CLOCK_MODE; 1900 real_div = div + 1; 1901 clk_mul = host->clk_mul; 1902 } else { 1903 real_div = max_t(int, 1, div << 1); 1904 } 1905 goto clock_set; 1906 } 1907 1908 /* 1909 * Check if the Host Controller supports Programmable Clock 1910 * Mode. 1911 */ 1912 if (host->clk_mul) { 1913 for (div = 1; div <= 1024; div++) { 1914 if ((host->max_clk * host->clk_mul / div) 1915 <= clock) 1916 break; 1917 } 1918 if ((host->max_clk * host->clk_mul / div) <= clock) { 1919 /* 1920 * Set Programmable Clock Mode in the Clock 1921 * Control register. 1922 */ 1923 clk = SDHCI_PROG_CLOCK_MODE; 1924 real_div = div; 1925 clk_mul = host->clk_mul; 1926 div--; 1927 } else { 1928 /* 1929 * Divisor can be too small to reach clock 1930 * speed requirement. Then use the base clock. 1931 */ 1932 switch_base_clk = true; 1933 } 1934 } 1935 1936 if (!host->clk_mul || switch_base_clk) { 1937 /* Version 3.00 divisors must be a multiple of 2. */ 1938 if (host->max_clk <= clock) 1939 div = 1; 1940 else { 1941 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1942 div += 2) { 1943 if ((host->max_clk / div) <= clock) 1944 break; 1945 } 1946 } 1947 real_div = div; 1948 div >>= 1; 1949 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1950 && !div && host->max_clk <= 25000000) 1951 div = 1; 1952 } 1953 } else { 1954 /* Version 2.00 divisors must be a power of 2. */ 1955 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1956 if ((host->max_clk / div) <= clock) 1957 break; 1958 } 1959 real_div = div; 1960 div >>= 1; 1961 } 1962 1963 clock_set: 1964 if (real_div) 1965 *actual_clock = (host->max_clk * clk_mul) / real_div; 1966 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1967 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1968 << SDHCI_DIVIDER_HI_SHIFT; 1969 1970 return clk; 1971 } 1972 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1973 1974 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1975 { 1976 ktime_t timeout; 1977 1978 clk |= SDHCI_CLOCK_INT_EN; 1979 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1980 1981 /* Wait max 150 ms */ 1982 timeout = ktime_add_ms(ktime_get(), 150); 1983 while (1) { 1984 bool timedout = ktime_after(ktime_get(), timeout); 1985 1986 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1987 if (clk & SDHCI_CLOCK_INT_STABLE) 1988 break; 1989 if (timedout) { 1990 pr_err("%s: Internal clock never stabilised.\n", 1991 mmc_hostname(host->mmc)); 1992 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1993 sdhci_dumpregs(host); 1994 return; 1995 } 1996 udelay(10); 1997 } 1998 1999 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 2000 clk |= SDHCI_CLOCK_PLL_EN; 2001 clk &= ~SDHCI_CLOCK_INT_STABLE; 2002 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2003 2004 /* Wait max 150 ms */ 2005 timeout = ktime_add_ms(ktime_get(), 150); 2006 while (1) { 2007 bool timedout = ktime_after(ktime_get(), timeout); 2008 2009 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2010 if (clk & SDHCI_CLOCK_INT_STABLE) 2011 break; 2012 if (timedout) { 2013 pr_err("%s: PLL clock never stabilised.\n", 2014 mmc_hostname(host->mmc)); 2015 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2016 sdhci_dumpregs(host); 2017 return; 2018 } 2019 udelay(10); 2020 } 2021 } 2022 2023 clk |= SDHCI_CLOCK_CARD_EN; 2024 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2025 } 2026 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2027 2028 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2029 { 2030 u16 clk; 2031 2032 host->mmc->actual_clock = 0; 2033 2034 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2035 2036 if (clock == 0) 2037 return; 2038 2039 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2040 sdhci_enable_clk(host, clk); 2041 } 2042 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2043 2044 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2045 unsigned short vdd) 2046 { 2047 struct mmc_host *mmc = host->mmc; 2048 2049 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2050 2051 if (mode != MMC_POWER_OFF) 2052 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2053 else 2054 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2055 } 2056 2057 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2058 unsigned short vdd) 2059 { 2060 u8 pwr = 0; 2061 2062 if (mode != MMC_POWER_OFF) { 2063 switch (1 << vdd) { 2064 case MMC_VDD_165_195: 2065 /* 2066 * Without a regulator, SDHCI does not support 2.0v 2067 * so we only get here if the driver deliberately 2068 * added the 2.0v range to ocr_avail. Map it to 1.8v 2069 * for the purpose of turning on the power. 2070 */ 2071 case MMC_VDD_20_21: 2072 pwr = SDHCI_POWER_180; 2073 break; 2074 case MMC_VDD_29_30: 2075 case MMC_VDD_30_31: 2076 pwr = SDHCI_POWER_300; 2077 break; 2078 case MMC_VDD_32_33: 2079 case MMC_VDD_33_34: 2080 /* 2081 * 3.4 ~ 3.6V are valid only for those platforms where it's 2082 * known that the voltage range is supported by hardware. 2083 */ 2084 case MMC_VDD_34_35: 2085 case MMC_VDD_35_36: 2086 pwr = SDHCI_POWER_330; 2087 break; 2088 default: 2089 WARN(1, "%s: Invalid vdd %#x\n", 2090 mmc_hostname(host->mmc), vdd); 2091 break; 2092 } 2093 } 2094 2095 if (host->pwr == pwr) 2096 return; 2097 2098 host->pwr = pwr; 2099 2100 if (pwr == 0) { 2101 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2102 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2103 sdhci_runtime_pm_bus_off(host); 2104 } else { 2105 /* 2106 * Spec says that we should clear the power reg before setting 2107 * a new value. Some controllers don't seem to like this though. 2108 */ 2109 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2110 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2111 2112 /* 2113 * At least the Marvell CaFe chip gets confused if we set the 2114 * voltage and set turn on power at the same time, so set the 2115 * voltage first. 2116 */ 2117 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2118 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2119 2120 pwr |= SDHCI_POWER_ON; 2121 2122 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2123 2124 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2125 sdhci_runtime_pm_bus_on(host); 2126 2127 /* 2128 * Some controllers need an extra 10ms delay of 10ms before 2129 * they can apply clock after applying power 2130 */ 2131 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2132 mdelay(10); 2133 } 2134 } 2135 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2136 2137 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2138 unsigned short vdd) 2139 { 2140 if (IS_ERR(host->mmc->supply.vmmc)) 2141 sdhci_set_power_noreg(host, mode, vdd); 2142 else 2143 sdhci_set_power_reg(host, mode, vdd); 2144 } 2145 EXPORT_SYMBOL_GPL(sdhci_set_power); 2146 2147 /* 2148 * Some controllers need to configure a valid bus voltage on their power 2149 * register regardless of whether an external regulator is taking care of power 2150 * supply. This helper function takes care of it if set as the controller's 2151 * sdhci_ops.set_power callback. 2152 */ 2153 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2154 unsigned char mode, 2155 unsigned short vdd) 2156 { 2157 if (!IS_ERR(host->mmc->supply.vmmc)) { 2158 struct mmc_host *mmc = host->mmc; 2159 2160 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2161 } 2162 sdhci_set_power_noreg(host, mode, vdd); 2163 } 2164 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2165 2166 /*****************************************************************************\ 2167 * * 2168 * MMC callbacks * 2169 * * 2170 \*****************************************************************************/ 2171 2172 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2173 { 2174 struct sdhci_host *host = mmc_priv(mmc); 2175 struct mmc_command *cmd; 2176 unsigned long flags; 2177 bool present; 2178 2179 /* Firstly check card presence */ 2180 present = mmc->ops->get_cd(mmc); 2181 2182 spin_lock_irqsave(&host->lock, flags); 2183 2184 sdhci_led_activate(host); 2185 2186 if (sdhci_present_error(host, mrq->cmd, present)) 2187 goto out_finish; 2188 2189 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2190 2191 if (!sdhci_send_command_retry(host, cmd, flags)) 2192 goto out_finish; 2193 2194 spin_unlock_irqrestore(&host->lock, flags); 2195 2196 return; 2197 2198 out_finish: 2199 sdhci_finish_mrq(host, mrq); 2200 spin_unlock_irqrestore(&host->lock, flags); 2201 } 2202 EXPORT_SYMBOL_GPL(sdhci_request); 2203 2204 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2205 { 2206 struct sdhci_host *host = mmc_priv(mmc); 2207 struct mmc_command *cmd; 2208 unsigned long flags; 2209 int ret = 0; 2210 2211 spin_lock_irqsave(&host->lock, flags); 2212 2213 if (sdhci_present_error(host, mrq->cmd, true)) { 2214 sdhci_finish_mrq(host, mrq); 2215 goto out_finish; 2216 } 2217 2218 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2219 2220 /* 2221 * The HSQ may send a command in interrupt context without polling 2222 * the busy signaling, which means we should return BUSY if controller 2223 * has not released inhibit bits to allow HSQ trying to send request 2224 * again in non-atomic context. So we should not finish this request 2225 * here. 2226 */ 2227 if (!sdhci_send_command(host, cmd)) 2228 ret = -EBUSY; 2229 else 2230 sdhci_led_activate(host); 2231 2232 out_finish: 2233 spin_unlock_irqrestore(&host->lock, flags); 2234 return ret; 2235 } 2236 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2237 2238 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2239 { 2240 u8 ctrl; 2241 2242 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2243 if (width == MMC_BUS_WIDTH_8) { 2244 ctrl &= ~SDHCI_CTRL_4BITBUS; 2245 ctrl |= SDHCI_CTRL_8BITBUS; 2246 } else { 2247 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2248 ctrl &= ~SDHCI_CTRL_8BITBUS; 2249 if (width == MMC_BUS_WIDTH_4) 2250 ctrl |= SDHCI_CTRL_4BITBUS; 2251 else 2252 ctrl &= ~SDHCI_CTRL_4BITBUS; 2253 } 2254 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2255 } 2256 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2257 2258 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2259 { 2260 u16 ctrl_2; 2261 2262 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2263 /* Select Bus Speed Mode for host */ 2264 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2265 if ((timing == MMC_TIMING_MMC_HS200) || 2266 (timing == MMC_TIMING_UHS_SDR104)) 2267 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2268 else if (timing == MMC_TIMING_UHS_SDR12) 2269 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2270 else if (timing == MMC_TIMING_UHS_SDR25) 2271 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2272 else if (timing == MMC_TIMING_UHS_SDR50) 2273 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2274 else if ((timing == MMC_TIMING_UHS_DDR50) || 2275 (timing == MMC_TIMING_MMC_DDR52)) 2276 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2277 else if (timing == MMC_TIMING_MMC_HS400) 2278 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2279 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2280 } 2281 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2282 2283 static bool sdhci_timing_has_preset(unsigned char timing) 2284 { 2285 switch (timing) { 2286 case MMC_TIMING_UHS_SDR12: 2287 case MMC_TIMING_UHS_SDR25: 2288 case MMC_TIMING_UHS_SDR50: 2289 case MMC_TIMING_UHS_SDR104: 2290 case MMC_TIMING_UHS_DDR50: 2291 case MMC_TIMING_MMC_DDR52: 2292 return true; 2293 }; 2294 return false; 2295 } 2296 2297 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) 2298 { 2299 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2300 sdhci_timing_has_preset(timing); 2301 } 2302 2303 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) 2304 { 2305 /* 2306 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK 2307 * Frequency. Check if preset values need to be enabled, or the Driver 2308 * Strength needs updating. Note, clock changes are handled separately. 2309 */ 2310 return !host->preset_enabled && 2311 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); 2312 } 2313 2314 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2315 { 2316 struct sdhci_host *host = mmc_priv(mmc); 2317 bool reinit_uhs = host->reinit_uhs; 2318 bool turning_on_clk = false; 2319 u8 ctrl; 2320 2321 host->reinit_uhs = false; 2322 2323 if (ios->power_mode == MMC_POWER_UNDEFINED) 2324 return; 2325 2326 if (host->flags & SDHCI_DEVICE_DEAD) { 2327 if (!IS_ERR(mmc->supply.vmmc) && 2328 ios->power_mode == MMC_POWER_OFF) 2329 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2330 return; 2331 } 2332 2333 /* 2334 * Reset the chip on each power off. 2335 * Should clear out any weird states. 2336 */ 2337 if (ios->power_mode == MMC_POWER_OFF) { 2338 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2339 sdhci_reinit(host); 2340 } 2341 2342 if (host->version >= SDHCI_SPEC_300 && 2343 (ios->power_mode == MMC_POWER_UP) && 2344 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2345 sdhci_enable_preset_value(host, false); 2346 2347 if (!ios->clock || ios->clock != host->clock) { 2348 turning_on_clk = ios->clock && !host->clock; 2349 2350 host->ops->set_clock(host, ios->clock); 2351 host->clock = ios->clock; 2352 2353 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2354 host->clock) { 2355 host->timeout_clk = mmc->actual_clock ? 2356 mmc->actual_clock / 1000 : 2357 host->clock / 1000; 2358 mmc->max_busy_timeout = 2359 host->ops->get_max_timeout_count ? 2360 host->ops->get_max_timeout_count(host) : 2361 1 << 27; 2362 mmc->max_busy_timeout /= host->timeout_clk; 2363 } 2364 } 2365 2366 if (host->ops->set_power) 2367 host->ops->set_power(host, ios->power_mode, ios->vdd); 2368 else 2369 sdhci_set_power(host, ios->power_mode, ios->vdd); 2370 2371 if (host->ops->platform_send_init_74_clocks) 2372 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2373 2374 host->ops->set_bus_width(host, ios->bus_width); 2375 2376 /* 2377 * Special case to avoid multiple clock changes during voltage 2378 * switching. 2379 */ 2380 if (!reinit_uhs && 2381 turning_on_clk && 2382 host->timing == ios->timing && 2383 host->version >= SDHCI_SPEC_300 && 2384 !sdhci_presetable_values_change(host, ios)) 2385 return; 2386 2387 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2388 2389 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2390 if (ios->timing == MMC_TIMING_SD_HS || 2391 ios->timing == MMC_TIMING_MMC_HS || 2392 ios->timing == MMC_TIMING_MMC_HS400 || 2393 ios->timing == MMC_TIMING_MMC_HS200 || 2394 ios->timing == MMC_TIMING_MMC_DDR52 || 2395 ios->timing == MMC_TIMING_UHS_SDR50 || 2396 ios->timing == MMC_TIMING_UHS_SDR104 || 2397 ios->timing == MMC_TIMING_UHS_DDR50 || 2398 ios->timing == MMC_TIMING_UHS_SDR25) 2399 ctrl |= SDHCI_CTRL_HISPD; 2400 else 2401 ctrl &= ~SDHCI_CTRL_HISPD; 2402 } 2403 2404 if (host->version >= SDHCI_SPEC_300) { 2405 u16 clk, ctrl_2; 2406 2407 if (!host->preset_enabled) { 2408 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2409 /* 2410 * We only need to set Driver Strength if the 2411 * preset value enable is not set. 2412 */ 2413 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2414 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2415 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2416 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2417 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2418 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2419 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2420 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2421 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2422 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2423 else { 2424 pr_warn("%s: invalid driver type, default to driver type B\n", 2425 mmc_hostname(mmc)); 2426 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2427 } 2428 2429 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2430 host->drv_type = ios->drv_type; 2431 } else { 2432 /* 2433 * According to SDHC Spec v3.00, if the Preset Value 2434 * Enable in the Host Control 2 register is set, we 2435 * need to reset SD Clock Enable before changing High 2436 * Speed Enable to avoid generating clock gliches. 2437 */ 2438 2439 /* Reset SD Clock Enable */ 2440 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2441 clk &= ~SDHCI_CLOCK_CARD_EN; 2442 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2443 2444 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2445 2446 /* Re-enable SD Clock */ 2447 host->ops->set_clock(host, host->clock); 2448 } 2449 2450 /* Reset SD Clock Enable */ 2451 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2452 clk &= ~SDHCI_CLOCK_CARD_EN; 2453 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2454 2455 host->ops->set_uhs_signaling(host, ios->timing); 2456 host->timing = ios->timing; 2457 2458 if (sdhci_preset_needed(host, ios->timing)) { 2459 u16 preset; 2460 2461 sdhci_enable_preset_value(host, true); 2462 preset = sdhci_get_preset_value(host); 2463 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2464 preset); 2465 host->drv_type = ios->drv_type; 2466 } 2467 2468 /* Re-enable SD Clock */ 2469 host->ops->set_clock(host, host->clock); 2470 } else 2471 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2472 } 2473 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2474 2475 static int sdhci_get_cd(struct mmc_host *mmc) 2476 { 2477 struct sdhci_host *host = mmc_priv(mmc); 2478 int gpio_cd = mmc_gpio_get_cd(mmc); 2479 2480 if (host->flags & SDHCI_DEVICE_DEAD) 2481 return 0; 2482 2483 /* If nonremovable, assume that the card is always present. */ 2484 if (!mmc_card_is_removable(mmc)) 2485 return 1; 2486 2487 /* 2488 * Try slot gpio detect, if defined it take precedence 2489 * over build in controller functionality 2490 */ 2491 if (gpio_cd >= 0) 2492 return !!gpio_cd; 2493 2494 /* If polling, assume that the card is always present. */ 2495 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2496 return 1; 2497 2498 /* Host native card detect */ 2499 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2500 } 2501 2502 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2503 { 2504 struct sdhci_host *host = mmc_priv(mmc); 2505 unsigned long flags; 2506 int ret = 0; 2507 2508 spin_lock_irqsave(&host->lock, flags); 2509 2510 if (host->flags & SDHCI_DEVICE_DEAD) 2511 goto out; 2512 2513 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2514 out: 2515 spin_unlock_irqrestore(&host->lock, flags); 2516 2517 return ret; 2518 } 2519 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2520 2521 static int sdhci_check_ro(struct sdhci_host *host) 2522 { 2523 unsigned long flags; 2524 int is_readonly; 2525 2526 spin_lock_irqsave(&host->lock, flags); 2527 2528 if (host->flags & SDHCI_DEVICE_DEAD) 2529 is_readonly = 0; 2530 else if (host->ops->get_ro) 2531 is_readonly = host->ops->get_ro(host); 2532 else if (mmc_can_gpio_ro(host->mmc)) 2533 is_readonly = mmc_gpio_get_ro(host->mmc); 2534 else 2535 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2536 & SDHCI_WRITE_PROTECT); 2537 2538 spin_unlock_irqrestore(&host->lock, flags); 2539 2540 /* This quirk needs to be replaced by a callback-function later */ 2541 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2542 !is_readonly : is_readonly; 2543 } 2544 2545 #define SAMPLE_COUNT 5 2546 2547 static int sdhci_get_ro(struct mmc_host *mmc) 2548 { 2549 struct sdhci_host *host = mmc_priv(mmc); 2550 int i, ro_count; 2551 2552 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2553 return sdhci_check_ro(host); 2554 2555 ro_count = 0; 2556 for (i = 0; i < SAMPLE_COUNT; i++) { 2557 if (sdhci_check_ro(host)) { 2558 if (++ro_count > SAMPLE_COUNT / 2) 2559 return 1; 2560 } 2561 msleep(30); 2562 } 2563 return 0; 2564 } 2565 2566 static void sdhci_hw_reset(struct mmc_host *mmc) 2567 { 2568 struct sdhci_host *host = mmc_priv(mmc); 2569 2570 if (host->ops && host->ops->hw_reset) 2571 host->ops->hw_reset(host); 2572 } 2573 2574 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2575 { 2576 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2577 if (enable) 2578 host->ier |= SDHCI_INT_CARD_INT; 2579 else 2580 host->ier &= ~SDHCI_INT_CARD_INT; 2581 2582 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2583 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2584 } 2585 } 2586 2587 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2588 { 2589 struct sdhci_host *host = mmc_priv(mmc); 2590 unsigned long flags; 2591 2592 if (enable) 2593 pm_runtime_get_noresume(mmc_dev(mmc)); 2594 2595 spin_lock_irqsave(&host->lock, flags); 2596 sdhci_enable_sdio_irq_nolock(host, enable); 2597 spin_unlock_irqrestore(&host->lock, flags); 2598 2599 if (!enable) 2600 pm_runtime_put_noidle(mmc_dev(mmc)); 2601 } 2602 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2603 2604 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2605 { 2606 struct sdhci_host *host = mmc_priv(mmc); 2607 unsigned long flags; 2608 2609 spin_lock_irqsave(&host->lock, flags); 2610 sdhci_enable_sdio_irq_nolock(host, true); 2611 spin_unlock_irqrestore(&host->lock, flags); 2612 } 2613 2614 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2615 struct mmc_ios *ios) 2616 { 2617 struct sdhci_host *host = mmc_priv(mmc); 2618 u16 ctrl; 2619 int ret; 2620 2621 /* 2622 * Signal Voltage Switching is only applicable for Host Controllers 2623 * v3.00 and above. 2624 */ 2625 if (host->version < SDHCI_SPEC_300) 2626 return 0; 2627 2628 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2629 2630 switch (ios->signal_voltage) { 2631 case MMC_SIGNAL_VOLTAGE_330: 2632 if (!(host->flags & SDHCI_SIGNALING_330)) 2633 return -EINVAL; 2634 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2635 ctrl &= ~SDHCI_CTRL_VDD_180; 2636 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2637 2638 if (!IS_ERR(mmc->supply.vqmmc)) { 2639 ret = mmc_regulator_set_vqmmc(mmc, ios); 2640 if (ret < 0) { 2641 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2642 mmc_hostname(mmc)); 2643 return -EIO; 2644 } 2645 } 2646 /* Wait for 5ms */ 2647 usleep_range(5000, 5500); 2648 2649 /* 3.3V regulator output should be stable within 5 ms */ 2650 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2651 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2652 return 0; 2653 2654 pr_warn("%s: 3.3V regulator output did not become stable\n", 2655 mmc_hostname(mmc)); 2656 2657 return -EAGAIN; 2658 case MMC_SIGNAL_VOLTAGE_180: 2659 if (!(host->flags & SDHCI_SIGNALING_180)) 2660 return -EINVAL; 2661 if (!IS_ERR(mmc->supply.vqmmc)) { 2662 ret = mmc_regulator_set_vqmmc(mmc, ios); 2663 if (ret < 0) { 2664 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2665 mmc_hostname(mmc)); 2666 return -EIO; 2667 } 2668 } 2669 2670 /* 2671 * Enable 1.8V Signal Enable in the Host Control2 2672 * register 2673 */ 2674 ctrl |= SDHCI_CTRL_VDD_180; 2675 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2676 2677 /* Some controller need to do more when switching */ 2678 if (host->ops->voltage_switch) 2679 host->ops->voltage_switch(host); 2680 2681 /* 1.8V regulator output should be stable within 5 ms */ 2682 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2683 if (ctrl & SDHCI_CTRL_VDD_180) 2684 return 0; 2685 2686 pr_warn("%s: 1.8V regulator output did not become stable\n", 2687 mmc_hostname(mmc)); 2688 2689 return -EAGAIN; 2690 case MMC_SIGNAL_VOLTAGE_120: 2691 if (!(host->flags & SDHCI_SIGNALING_120)) 2692 return -EINVAL; 2693 if (!IS_ERR(mmc->supply.vqmmc)) { 2694 ret = mmc_regulator_set_vqmmc(mmc, ios); 2695 if (ret < 0) { 2696 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2697 mmc_hostname(mmc)); 2698 return -EIO; 2699 } 2700 } 2701 return 0; 2702 default: 2703 /* No signal voltage switch required */ 2704 return 0; 2705 } 2706 } 2707 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2708 2709 static int sdhci_card_busy(struct mmc_host *mmc) 2710 { 2711 struct sdhci_host *host = mmc_priv(mmc); 2712 u32 present_state; 2713 2714 /* Check whether DAT[0] is 0 */ 2715 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2716 2717 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2718 } 2719 2720 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2721 { 2722 struct sdhci_host *host = mmc_priv(mmc); 2723 unsigned long flags; 2724 2725 spin_lock_irqsave(&host->lock, flags); 2726 host->flags |= SDHCI_HS400_TUNING; 2727 spin_unlock_irqrestore(&host->lock, flags); 2728 2729 return 0; 2730 } 2731 2732 void sdhci_start_tuning(struct sdhci_host *host) 2733 { 2734 u16 ctrl; 2735 2736 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2737 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2738 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2739 ctrl |= SDHCI_CTRL_TUNED_CLK; 2740 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2741 2742 /* 2743 * As per the Host Controller spec v3.00, tuning command 2744 * generates Buffer Read Ready interrupt, so enable that. 2745 * 2746 * Note: The spec clearly says that when tuning sequence 2747 * is being performed, the controller does not generate 2748 * interrupts other than Buffer Read Ready interrupt. But 2749 * to make sure we don't hit a controller bug, we _only_ 2750 * enable Buffer Read Ready interrupt here. 2751 */ 2752 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2753 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2754 } 2755 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2756 2757 void sdhci_end_tuning(struct sdhci_host *host) 2758 { 2759 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2760 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2761 } 2762 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2763 2764 void sdhci_reset_tuning(struct sdhci_host *host) 2765 { 2766 u16 ctrl; 2767 2768 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2769 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2770 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2771 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2772 } 2773 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2774 2775 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2776 { 2777 sdhci_reset_tuning(host); 2778 2779 sdhci_reset_for(host, TUNING_ABORT); 2780 2781 sdhci_end_tuning(host); 2782 2783 mmc_send_abort_tuning(host->mmc, opcode); 2784 } 2785 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2786 2787 /* 2788 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2789 * tuning command does not have a data payload (or rather the hardware does it 2790 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2791 * interrupt setup is different to other commands and there is no timeout 2792 * interrupt so special handling is needed. 2793 */ 2794 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2795 { 2796 struct mmc_host *mmc = host->mmc; 2797 struct mmc_command cmd = {}; 2798 struct mmc_request mrq = {}; 2799 unsigned long flags; 2800 u32 b = host->sdma_boundary; 2801 2802 spin_lock_irqsave(&host->lock, flags); 2803 2804 cmd.opcode = opcode; 2805 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2806 cmd.mrq = &mrq; 2807 2808 mrq.cmd = &cmd; 2809 /* 2810 * In response to CMD19, the card sends 64 bytes of tuning 2811 * block to the Host Controller. So we set the block size 2812 * to 64 here. 2813 */ 2814 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2815 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2816 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2817 else 2818 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2819 2820 /* 2821 * The tuning block is sent by the card to the host controller. 2822 * So we set the TRNS_READ bit in the Transfer Mode register. 2823 * This also takes care of setting DMA Enable and Multi Block 2824 * Select in the same register to 0. 2825 */ 2826 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2827 2828 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2829 spin_unlock_irqrestore(&host->lock, flags); 2830 host->tuning_done = 0; 2831 return; 2832 } 2833 2834 host->cmd = NULL; 2835 2836 sdhci_del_timer(host, &mrq); 2837 2838 host->tuning_done = 0; 2839 2840 spin_unlock_irqrestore(&host->lock, flags); 2841 2842 /* Wait for Buffer Read Ready interrupt */ 2843 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2844 msecs_to_jiffies(50)); 2845 2846 } 2847 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2848 2849 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2850 { 2851 int i; 2852 2853 /* 2854 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2855 * of loops reaches tuning loop count. 2856 */ 2857 for (i = 0; i < host->tuning_loop_count; i++) { 2858 u16 ctrl; 2859 2860 sdhci_send_tuning(host, opcode); 2861 2862 if (!host->tuning_done) { 2863 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2864 mmc_hostname(host->mmc)); 2865 sdhci_abort_tuning(host, opcode); 2866 return -ETIMEDOUT; 2867 } 2868 2869 /* Spec does not require a delay between tuning cycles */ 2870 if (host->tuning_delay > 0) 2871 mdelay(host->tuning_delay); 2872 2873 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2874 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2875 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2876 return 0; /* Success! */ 2877 break; 2878 } 2879 2880 } 2881 2882 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2883 mmc_hostname(host->mmc)); 2884 sdhci_reset_tuning(host); 2885 return -EAGAIN; 2886 } 2887 2888 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2889 { 2890 struct sdhci_host *host = mmc_priv(mmc); 2891 int err = 0; 2892 unsigned int tuning_count = 0; 2893 bool hs400_tuning; 2894 2895 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2896 2897 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2898 tuning_count = host->tuning_count; 2899 2900 /* 2901 * The Host Controller needs tuning in case of SDR104 and DDR50 2902 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2903 * the Capabilities register. 2904 * If the Host Controller supports the HS200 mode then the 2905 * tuning function has to be executed. 2906 */ 2907 switch (host->timing) { 2908 /* HS400 tuning is done in HS200 mode */ 2909 case MMC_TIMING_MMC_HS400: 2910 err = -EINVAL; 2911 goto out; 2912 2913 case MMC_TIMING_MMC_HS200: 2914 /* 2915 * Periodic re-tuning for HS400 is not expected to be needed, so 2916 * disable it here. 2917 */ 2918 if (hs400_tuning) 2919 tuning_count = 0; 2920 break; 2921 2922 case MMC_TIMING_UHS_SDR104: 2923 case MMC_TIMING_UHS_DDR50: 2924 break; 2925 2926 case MMC_TIMING_UHS_SDR50: 2927 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2928 break; 2929 fallthrough; 2930 2931 default: 2932 goto out; 2933 } 2934 2935 if (host->ops->platform_execute_tuning) { 2936 err = host->ops->platform_execute_tuning(host, opcode); 2937 goto out; 2938 } 2939 2940 mmc->retune_period = tuning_count; 2941 2942 if (host->tuning_delay < 0) 2943 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2944 2945 sdhci_start_tuning(host); 2946 2947 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2948 2949 sdhci_end_tuning(host); 2950 out: 2951 host->flags &= ~SDHCI_HS400_TUNING; 2952 2953 return err; 2954 } 2955 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2956 2957 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2958 { 2959 /* Host Controller v3.00 defines preset value registers */ 2960 if (host->version < SDHCI_SPEC_300) 2961 return; 2962 2963 /* 2964 * We only enable or disable Preset Value if they are not already 2965 * enabled or disabled respectively. Otherwise, we bail out. 2966 */ 2967 if (host->preset_enabled != enable) { 2968 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2969 2970 if (enable) 2971 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2972 else 2973 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2974 2975 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2976 2977 if (enable) 2978 host->flags |= SDHCI_PV_ENABLED; 2979 else 2980 host->flags &= ~SDHCI_PV_ENABLED; 2981 2982 host->preset_enabled = enable; 2983 } 2984 } 2985 2986 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2987 int err) 2988 { 2989 struct mmc_data *data = mrq->data; 2990 2991 if (data->host_cookie != COOKIE_UNMAPPED) 2992 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 2993 mmc_get_dma_dir(data)); 2994 2995 data->host_cookie = COOKIE_UNMAPPED; 2996 } 2997 2998 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2999 { 3000 struct sdhci_host *host = mmc_priv(mmc); 3001 3002 mrq->data->host_cookie = COOKIE_UNMAPPED; 3003 3004 /* 3005 * No pre-mapping in the pre hook if we're using the bounce buffer, 3006 * for that we would need two bounce buffers since one buffer is 3007 * in flight when this is getting called. 3008 */ 3009 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 3010 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 3011 } 3012 3013 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 3014 { 3015 if (host->data_cmd) { 3016 host->data_cmd->error = err; 3017 sdhci_finish_mrq(host, host->data_cmd->mrq); 3018 } 3019 3020 if (host->cmd) { 3021 host->cmd->error = err; 3022 sdhci_finish_mrq(host, host->cmd->mrq); 3023 } 3024 } 3025 3026 static void sdhci_card_event(struct mmc_host *mmc) 3027 { 3028 struct sdhci_host *host = mmc_priv(mmc); 3029 unsigned long flags; 3030 int present; 3031 3032 /* First check if client has provided their own card event */ 3033 if (host->ops->card_event) 3034 host->ops->card_event(host); 3035 3036 present = mmc->ops->get_cd(mmc); 3037 3038 spin_lock_irqsave(&host->lock, flags); 3039 3040 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3041 if (sdhci_has_requests(host) && !present) { 3042 pr_err("%s: Card removed during transfer!\n", 3043 mmc_hostname(mmc)); 3044 pr_err("%s: Resetting controller.\n", 3045 mmc_hostname(mmc)); 3046 3047 sdhci_reset_for(host, CARD_REMOVED); 3048 3049 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3050 } 3051 3052 spin_unlock_irqrestore(&host->lock, flags); 3053 } 3054 3055 static const struct mmc_host_ops sdhci_ops = { 3056 .request = sdhci_request, 3057 .post_req = sdhci_post_req, 3058 .pre_req = sdhci_pre_req, 3059 .set_ios = sdhci_set_ios, 3060 .get_cd = sdhci_get_cd, 3061 .get_ro = sdhci_get_ro, 3062 .card_hw_reset = sdhci_hw_reset, 3063 .enable_sdio_irq = sdhci_enable_sdio_irq, 3064 .ack_sdio_irq = sdhci_ack_sdio_irq, 3065 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3066 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3067 .execute_tuning = sdhci_execute_tuning, 3068 .card_event = sdhci_card_event, 3069 .card_busy = sdhci_card_busy, 3070 }; 3071 3072 /*****************************************************************************\ 3073 * * 3074 * Request done * 3075 * * 3076 \*****************************************************************************/ 3077 3078 static bool sdhci_request_done(struct sdhci_host *host) 3079 { 3080 unsigned long flags; 3081 struct mmc_request *mrq; 3082 int i; 3083 3084 spin_lock_irqsave(&host->lock, flags); 3085 3086 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3087 mrq = host->mrqs_done[i]; 3088 if (mrq) 3089 break; 3090 } 3091 3092 if (!mrq) { 3093 spin_unlock_irqrestore(&host->lock, flags); 3094 return true; 3095 } 3096 3097 /* 3098 * The controller needs a reset of internal state machines 3099 * upon error conditions. 3100 */ 3101 if (sdhci_needs_reset(host, mrq)) { 3102 /* 3103 * Do not finish until command and data lines are available for 3104 * reset. Note there can only be one other mrq, so it cannot 3105 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3106 * would both be null. 3107 */ 3108 if (host->cmd || host->data_cmd) { 3109 spin_unlock_irqrestore(&host->lock, flags); 3110 return true; 3111 } 3112 3113 /* Some controllers need this kick or reset won't work here */ 3114 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3115 /* This is to force an update */ 3116 host->ops->set_clock(host, host->clock); 3117 3118 sdhci_reset_for(host, REQUEST_ERROR); 3119 3120 host->pending_reset = false; 3121 } 3122 3123 /* 3124 * Always unmap the data buffers if they were mapped by 3125 * sdhci_prepare_data() whenever we finish with a request. 3126 * This avoids leaking DMA mappings on error. 3127 */ 3128 if (host->flags & SDHCI_REQ_USE_DMA) { 3129 struct mmc_data *data = mrq->data; 3130 3131 if (host->use_external_dma && data && 3132 (mrq->cmd->error || data->error)) { 3133 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3134 3135 host->mrqs_done[i] = NULL; 3136 spin_unlock_irqrestore(&host->lock, flags); 3137 dmaengine_terminate_sync(chan); 3138 spin_lock_irqsave(&host->lock, flags); 3139 sdhci_set_mrq_done(host, mrq); 3140 } 3141 3142 if (data && data->host_cookie == COOKIE_MAPPED) { 3143 if (host->bounce_buffer) { 3144 /* 3145 * On reads, copy the bounced data into the 3146 * sglist 3147 */ 3148 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3149 unsigned int length = data->bytes_xfered; 3150 3151 if (length > host->bounce_buffer_size) { 3152 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3153 mmc_hostname(host->mmc), 3154 host->bounce_buffer_size, 3155 data->bytes_xfered); 3156 /* Cap it down and continue */ 3157 length = host->bounce_buffer_size; 3158 } 3159 dma_sync_single_for_cpu( 3160 mmc_dev(host->mmc), 3161 host->bounce_addr, 3162 host->bounce_buffer_size, 3163 DMA_FROM_DEVICE); 3164 sg_copy_from_buffer(data->sg, 3165 data->sg_len, 3166 host->bounce_buffer, 3167 length); 3168 } else { 3169 /* No copying, just switch ownership */ 3170 dma_sync_single_for_cpu( 3171 mmc_dev(host->mmc), 3172 host->bounce_addr, 3173 host->bounce_buffer_size, 3174 mmc_get_dma_dir(data)); 3175 } 3176 } else { 3177 /* Unmap the raw data */ 3178 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3179 data->sg_len, 3180 mmc_get_dma_dir(data)); 3181 } 3182 data->host_cookie = COOKIE_UNMAPPED; 3183 } 3184 } 3185 3186 host->mrqs_done[i] = NULL; 3187 3188 spin_unlock_irqrestore(&host->lock, flags); 3189 3190 if (host->ops->request_done) 3191 host->ops->request_done(host, mrq); 3192 else 3193 mmc_request_done(host->mmc, mrq); 3194 3195 return false; 3196 } 3197 3198 static void sdhci_complete_work(struct work_struct *work) 3199 { 3200 struct sdhci_host *host = container_of(work, struct sdhci_host, 3201 complete_work); 3202 3203 while (!sdhci_request_done(host)) 3204 ; 3205 } 3206 3207 static void sdhci_timeout_timer(struct timer_list *t) 3208 { 3209 struct sdhci_host *host; 3210 unsigned long flags; 3211 3212 host = from_timer(host, t, timer); 3213 3214 spin_lock_irqsave(&host->lock, flags); 3215 3216 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3217 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3218 mmc_hostname(host->mmc)); 3219 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3220 sdhci_dumpregs(host); 3221 3222 host->cmd->error = -ETIMEDOUT; 3223 sdhci_finish_mrq(host, host->cmd->mrq); 3224 } 3225 3226 spin_unlock_irqrestore(&host->lock, flags); 3227 } 3228 3229 static void sdhci_timeout_data_timer(struct timer_list *t) 3230 { 3231 struct sdhci_host *host; 3232 unsigned long flags; 3233 3234 host = from_timer(host, t, data_timer); 3235 3236 spin_lock_irqsave(&host->lock, flags); 3237 3238 if (host->data || host->data_cmd || 3239 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3240 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3241 mmc_hostname(host->mmc)); 3242 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3243 sdhci_dumpregs(host); 3244 3245 if (host->data) { 3246 host->data->error = -ETIMEDOUT; 3247 __sdhci_finish_data(host, true); 3248 queue_work(host->complete_wq, &host->complete_work); 3249 } else if (host->data_cmd) { 3250 host->data_cmd->error = -ETIMEDOUT; 3251 sdhci_finish_mrq(host, host->data_cmd->mrq); 3252 } else { 3253 host->cmd->error = -ETIMEDOUT; 3254 sdhci_finish_mrq(host, host->cmd->mrq); 3255 } 3256 } 3257 3258 spin_unlock_irqrestore(&host->lock, flags); 3259 } 3260 3261 /*****************************************************************************\ 3262 * * 3263 * Interrupt handling * 3264 * * 3265 \*****************************************************************************/ 3266 3267 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3268 { 3269 /* Handle auto-CMD12 error */ 3270 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3271 struct mmc_request *mrq = host->data_cmd->mrq; 3272 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3273 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3274 SDHCI_INT_DATA_TIMEOUT : 3275 SDHCI_INT_DATA_CRC; 3276 3277 /* Treat auto-CMD12 error the same as data error */ 3278 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3279 *intmask_p |= data_err_bit; 3280 return; 3281 } 3282 } 3283 3284 if (!host->cmd) { 3285 /* 3286 * SDHCI recovers from errors by resetting the cmd and data 3287 * circuits. Until that is done, there very well might be more 3288 * interrupts, so ignore them in that case. 3289 */ 3290 if (host->pending_reset) 3291 return; 3292 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3293 mmc_hostname(host->mmc), (unsigned)intmask); 3294 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3295 sdhci_dumpregs(host); 3296 return; 3297 } 3298 3299 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3300 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3301 if (intmask & SDHCI_INT_TIMEOUT) { 3302 host->cmd->error = -ETIMEDOUT; 3303 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3304 } else { 3305 host->cmd->error = -EILSEQ; 3306 if (!mmc_op_tuning(host->cmd->opcode)) 3307 sdhci_err_stats_inc(host, CMD_CRC); 3308 } 3309 /* Treat data command CRC error the same as data CRC error */ 3310 if (host->cmd->data && 3311 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3312 SDHCI_INT_CRC) { 3313 host->cmd = NULL; 3314 *intmask_p |= SDHCI_INT_DATA_CRC; 3315 return; 3316 } 3317 3318 __sdhci_finish_mrq(host, host->cmd->mrq); 3319 return; 3320 } 3321 3322 /* Handle auto-CMD23 error */ 3323 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3324 struct mmc_request *mrq = host->cmd->mrq; 3325 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3326 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3327 -ETIMEDOUT : 3328 -EILSEQ; 3329 3330 sdhci_err_stats_inc(host, AUTO_CMD); 3331 3332 if (sdhci_auto_cmd23(host, mrq)) { 3333 mrq->sbc->error = err; 3334 __sdhci_finish_mrq(host, mrq); 3335 return; 3336 } 3337 } 3338 3339 if (intmask & SDHCI_INT_RESPONSE) 3340 sdhci_finish_command(host); 3341 } 3342 3343 static void sdhci_adma_show_error(struct sdhci_host *host) 3344 { 3345 void *desc = host->adma_table; 3346 dma_addr_t dma = host->adma_addr; 3347 3348 sdhci_dumpregs(host); 3349 3350 while (true) { 3351 struct sdhci_adma2_64_desc *dma_desc = desc; 3352 3353 if (host->flags & SDHCI_USE_64_BIT_DMA) 3354 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3355 (unsigned long long)dma, 3356 le32_to_cpu(dma_desc->addr_hi), 3357 le32_to_cpu(dma_desc->addr_lo), 3358 le16_to_cpu(dma_desc->len), 3359 le16_to_cpu(dma_desc->cmd)); 3360 else 3361 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3362 (unsigned long long)dma, 3363 le32_to_cpu(dma_desc->addr_lo), 3364 le16_to_cpu(dma_desc->len), 3365 le16_to_cpu(dma_desc->cmd)); 3366 3367 desc += host->desc_sz; 3368 dma += host->desc_sz; 3369 3370 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3371 break; 3372 } 3373 } 3374 3375 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3376 { 3377 u32 command; 3378 3379 /* 3380 * CMD19 generates _only_ Buffer Read Ready interrupt if 3381 * use sdhci_send_tuning. 3382 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3383 * If not, sdhci_transfer_pio will never be called, make the 3384 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3385 */ 3386 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3387 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 3388 if (command == MMC_SEND_TUNING_BLOCK || 3389 command == MMC_SEND_TUNING_BLOCK_HS200) { 3390 host->tuning_done = 1; 3391 wake_up(&host->buf_ready_int); 3392 return; 3393 } 3394 } 3395 3396 if (!host->data) { 3397 struct mmc_command *data_cmd = host->data_cmd; 3398 3399 /* 3400 * The "data complete" interrupt is also used to 3401 * indicate that a busy state has ended. See comment 3402 * above in sdhci_cmd_irq(). 3403 */ 3404 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3405 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3406 host->data_cmd = NULL; 3407 data_cmd->error = -ETIMEDOUT; 3408 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3409 __sdhci_finish_mrq(host, data_cmd->mrq); 3410 return; 3411 } 3412 if (intmask & SDHCI_INT_DATA_END) { 3413 host->data_cmd = NULL; 3414 /* 3415 * Some cards handle busy-end interrupt 3416 * before the command completed, so make 3417 * sure we do things in the proper order. 3418 */ 3419 if (host->cmd == data_cmd) 3420 return; 3421 3422 __sdhci_finish_mrq(host, data_cmd->mrq); 3423 return; 3424 } 3425 } 3426 3427 /* 3428 * SDHCI recovers from errors by resetting the cmd and data 3429 * circuits. Until that is done, there very well might be more 3430 * interrupts, so ignore them in that case. 3431 */ 3432 if (host->pending_reset) 3433 return; 3434 3435 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3436 mmc_hostname(host->mmc), (unsigned)intmask); 3437 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3438 sdhci_dumpregs(host); 3439 3440 return; 3441 } 3442 3443 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3444 host->data->error = -ETIMEDOUT; 3445 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3446 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3447 host->data->error = -EILSEQ; 3448 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3449 sdhci_err_stats_inc(host, DAT_CRC); 3450 } else if ((intmask & SDHCI_INT_DATA_CRC) && 3451 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3452 != MMC_BUS_TEST_R) { 3453 host->data->error = -EILSEQ; 3454 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3455 sdhci_err_stats_inc(host, DAT_CRC); 3456 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3457 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3458 intmask); 3459 sdhci_adma_show_error(host); 3460 sdhci_err_stats_inc(host, ADMA); 3461 host->data->error = -EIO; 3462 if (host->ops->adma_workaround) 3463 host->ops->adma_workaround(host, intmask); 3464 } 3465 3466 if (host->data->error) 3467 sdhci_finish_data(host); 3468 else { 3469 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3470 sdhci_transfer_pio(host); 3471 3472 /* 3473 * We currently don't do anything fancy with DMA 3474 * boundaries, but as we can't disable the feature 3475 * we need to at least restart the transfer. 3476 * 3477 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3478 * should return a valid address to continue from, but as 3479 * some controllers are faulty, don't trust them. 3480 */ 3481 if (intmask & SDHCI_INT_DMA_END) { 3482 dma_addr_t dmastart, dmanow; 3483 3484 dmastart = sdhci_sdma_address(host); 3485 dmanow = dmastart + host->data->bytes_xfered; 3486 /* 3487 * Force update to the next DMA block boundary. 3488 */ 3489 dmanow = (dmanow & 3490 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3491 SDHCI_DEFAULT_BOUNDARY_SIZE; 3492 host->data->bytes_xfered = dmanow - dmastart; 3493 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3494 &dmastart, host->data->bytes_xfered, &dmanow); 3495 sdhci_set_sdma_addr(host, dmanow); 3496 } 3497 3498 if (intmask & SDHCI_INT_DATA_END) { 3499 if (host->cmd == host->data_cmd) { 3500 /* 3501 * Data managed to finish before the 3502 * command completed. Make sure we do 3503 * things in the proper order. 3504 */ 3505 host->data_early = 1; 3506 } else { 3507 sdhci_finish_data(host); 3508 } 3509 } 3510 } 3511 } 3512 3513 static inline bool sdhci_defer_done(struct sdhci_host *host, 3514 struct mmc_request *mrq) 3515 { 3516 struct mmc_data *data = mrq->data; 3517 3518 return host->pending_reset || host->always_defer_done || 3519 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3520 data->host_cookie == COOKIE_MAPPED); 3521 } 3522 3523 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3524 { 3525 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3526 irqreturn_t result = IRQ_NONE; 3527 struct sdhci_host *host = dev_id; 3528 u32 intmask, mask, unexpected = 0; 3529 int max_loops = 16; 3530 int i; 3531 3532 spin_lock(&host->lock); 3533 3534 if (host->runtime_suspended) { 3535 spin_unlock(&host->lock); 3536 return IRQ_NONE; 3537 } 3538 3539 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3540 if (!intmask || intmask == 0xffffffff) { 3541 result = IRQ_NONE; 3542 goto out; 3543 } 3544 3545 do { 3546 DBG("IRQ status 0x%08x\n", intmask); 3547 3548 if (host->ops->irq) { 3549 intmask = host->ops->irq(host, intmask); 3550 if (!intmask) 3551 goto cont; 3552 } 3553 3554 /* Clear selected interrupts. */ 3555 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3556 SDHCI_INT_BUS_POWER); 3557 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3558 3559 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3560 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3561 SDHCI_CARD_PRESENT; 3562 3563 /* 3564 * There is a observation on i.mx esdhc. INSERT 3565 * bit will be immediately set again when it gets 3566 * cleared, if a card is inserted. We have to mask 3567 * the irq to prevent interrupt storm which will 3568 * freeze the system. And the REMOVE gets the 3569 * same situation. 3570 * 3571 * More testing are needed here to ensure it works 3572 * for other platforms though. 3573 */ 3574 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3575 SDHCI_INT_CARD_REMOVE); 3576 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3577 SDHCI_INT_CARD_INSERT; 3578 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3579 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3580 3581 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3582 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3583 3584 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3585 SDHCI_INT_CARD_REMOVE); 3586 result = IRQ_WAKE_THREAD; 3587 } 3588 3589 if (intmask & SDHCI_INT_CMD_MASK) 3590 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3591 3592 if (intmask & SDHCI_INT_DATA_MASK) 3593 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3594 3595 if (intmask & SDHCI_INT_BUS_POWER) 3596 pr_err("%s: Card is consuming too much power!\n", 3597 mmc_hostname(host->mmc)); 3598 3599 if (intmask & SDHCI_INT_RETUNE) 3600 mmc_retune_needed(host->mmc); 3601 3602 if ((intmask & SDHCI_INT_CARD_INT) && 3603 (host->ier & SDHCI_INT_CARD_INT)) { 3604 sdhci_enable_sdio_irq_nolock(host, false); 3605 sdio_signal_irq(host->mmc); 3606 } 3607 3608 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3609 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3610 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3611 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3612 3613 if (intmask) { 3614 unexpected |= intmask; 3615 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3616 } 3617 cont: 3618 if (result == IRQ_NONE) 3619 result = IRQ_HANDLED; 3620 3621 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3622 } while (intmask && --max_loops); 3623 3624 /* Determine if mrqs can be completed immediately */ 3625 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3626 struct mmc_request *mrq = host->mrqs_done[i]; 3627 3628 if (!mrq) 3629 continue; 3630 3631 if (sdhci_defer_done(host, mrq)) { 3632 result = IRQ_WAKE_THREAD; 3633 } else { 3634 mrqs_done[i] = mrq; 3635 host->mrqs_done[i] = NULL; 3636 } 3637 } 3638 out: 3639 if (host->deferred_cmd) 3640 result = IRQ_WAKE_THREAD; 3641 3642 spin_unlock(&host->lock); 3643 3644 /* Process mrqs ready for immediate completion */ 3645 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3646 if (!mrqs_done[i]) 3647 continue; 3648 3649 if (host->ops->request_done) 3650 host->ops->request_done(host, mrqs_done[i]); 3651 else 3652 mmc_request_done(host->mmc, mrqs_done[i]); 3653 } 3654 3655 if (unexpected) { 3656 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3657 mmc_hostname(host->mmc), unexpected); 3658 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3659 sdhci_dumpregs(host); 3660 } 3661 3662 return result; 3663 } 3664 3665 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3666 { 3667 struct sdhci_host *host = dev_id; 3668 struct mmc_command *cmd; 3669 unsigned long flags; 3670 u32 isr; 3671 3672 while (!sdhci_request_done(host)) 3673 ; 3674 3675 spin_lock_irqsave(&host->lock, flags); 3676 3677 isr = host->thread_isr; 3678 host->thread_isr = 0; 3679 3680 cmd = host->deferred_cmd; 3681 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3682 sdhci_finish_mrq(host, cmd->mrq); 3683 3684 spin_unlock_irqrestore(&host->lock, flags); 3685 3686 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3687 struct mmc_host *mmc = host->mmc; 3688 3689 mmc->ops->card_event(mmc); 3690 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3691 } 3692 3693 return IRQ_HANDLED; 3694 } 3695 3696 /*****************************************************************************\ 3697 * * 3698 * Suspend/resume * 3699 * * 3700 \*****************************************************************************/ 3701 3702 #ifdef CONFIG_PM 3703 3704 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3705 { 3706 return mmc_card_is_removable(host->mmc) && 3707 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3708 !mmc_can_gpio_cd(host->mmc); 3709 } 3710 3711 /* 3712 * To enable wakeup events, the corresponding events have to be enabled in 3713 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3714 * Table' in the SD Host Controller Standard Specification. 3715 * It is useless to restore SDHCI_INT_ENABLE state in 3716 * sdhci_disable_irq_wakeups() since it will be set by 3717 * sdhci_enable_card_detection() or sdhci_init(). 3718 */ 3719 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3720 { 3721 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3722 SDHCI_WAKE_ON_INT; 3723 u32 irq_val = 0; 3724 u8 wake_val = 0; 3725 u8 val; 3726 3727 if (sdhci_cd_irq_can_wakeup(host)) { 3728 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3729 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3730 } 3731 3732 if (mmc_card_wake_sdio_irq(host->mmc)) { 3733 wake_val |= SDHCI_WAKE_ON_INT; 3734 irq_val |= SDHCI_INT_CARD_INT; 3735 } 3736 3737 if (!irq_val) 3738 return false; 3739 3740 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3741 val &= ~mask; 3742 val |= wake_val; 3743 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3744 3745 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3746 3747 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3748 3749 return host->irq_wake_enabled; 3750 } 3751 3752 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3753 { 3754 u8 val; 3755 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3756 | SDHCI_WAKE_ON_INT; 3757 3758 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3759 val &= ~mask; 3760 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3761 3762 disable_irq_wake(host->irq); 3763 3764 host->irq_wake_enabled = false; 3765 } 3766 3767 int sdhci_suspend_host(struct sdhci_host *host) 3768 { 3769 sdhci_disable_card_detection(host); 3770 3771 mmc_retune_timer_stop(host->mmc); 3772 3773 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3774 !sdhci_enable_irq_wakeups(host)) { 3775 host->ier = 0; 3776 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3777 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3778 free_irq(host->irq, host); 3779 } 3780 3781 return 0; 3782 } 3783 3784 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3785 3786 int sdhci_resume_host(struct sdhci_host *host) 3787 { 3788 struct mmc_host *mmc = host->mmc; 3789 int ret = 0; 3790 3791 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3792 if (host->ops->enable_dma) 3793 host->ops->enable_dma(host); 3794 } 3795 3796 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3797 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3798 /* Card keeps power but host controller does not */ 3799 sdhci_init(host, 0); 3800 host->pwr = 0; 3801 host->clock = 0; 3802 host->reinit_uhs = true; 3803 mmc->ops->set_ios(mmc, &mmc->ios); 3804 } else { 3805 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3806 } 3807 3808 if (host->irq_wake_enabled) { 3809 sdhci_disable_irq_wakeups(host); 3810 } else { 3811 ret = request_threaded_irq(host->irq, sdhci_irq, 3812 sdhci_thread_irq, IRQF_SHARED, 3813 mmc_hostname(mmc), host); 3814 if (ret) 3815 return ret; 3816 } 3817 3818 sdhci_enable_card_detection(host); 3819 3820 return ret; 3821 } 3822 3823 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3824 3825 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3826 { 3827 unsigned long flags; 3828 3829 mmc_retune_timer_stop(host->mmc); 3830 3831 spin_lock_irqsave(&host->lock, flags); 3832 host->ier &= SDHCI_INT_CARD_INT; 3833 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3834 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3835 spin_unlock_irqrestore(&host->lock, flags); 3836 3837 synchronize_hardirq(host->irq); 3838 3839 spin_lock_irqsave(&host->lock, flags); 3840 host->runtime_suspended = true; 3841 spin_unlock_irqrestore(&host->lock, flags); 3842 3843 return 0; 3844 } 3845 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3846 3847 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3848 { 3849 struct mmc_host *mmc = host->mmc; 3850 unsigned long flags; 3851 int host_flags = host->flags; 3852 3853 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3854 if (host->ops->enable_dma) 3855 host->ops->enable_dma(host); 3856 } 3857 3858 sdhci_init(host, soft_reset); 3859 3860 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3861 mmc->ios.power_mode != MMC_POWER_OFF) { 3862 /* Force clock and power re-program */ 3863 host->pwr = 0; 3864 host->clock = 0; 3865 host->reinit_uhs = true; 3866 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3867 mmc->ops->set_ios(mmc, &mmc->ios); 3868 3869 if ((host_flags & SDHCI_PV_ENABLED) && 3870 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3871 spin_lock_irqsave(&host->lock, flags); 3872 sdhci_enable_preset_value(host, true); 3873 spin_unlock_irqrestore(&host->lock, flags); 3874 } 3875 3876 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3877 mmc->ops->hs400_enhanced_strobe) 3878 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3879 } 3880 3881 spin_lock_irqsave(&host->lock, flags); 3882 3883 host->runtime_suspended = false; 3884 3885 /* Enable SDIO IRQ */ 3886 if (sdio_irq_claimed(mmc)) 3887 sdhci_enable_sdio_irq_nolock(host, true); 3888 3889 /* Enable Card Detection */ 3890 sdhci_enable_card_detection(host); 3891 3892 spin_unlock_irqrestore(&host->lock, flags); 3893 3894 return 0; 3895 } 3896 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3897 3898 #endif /* CONFIG_PM */ 3899 3900 /*****************************************************************************\ 3901 * * 3902 * Command Queue Engine (CQE) helpers * 3903 * * 3904 \*****************************************************************************/ 3905 3906 void sdhci_cqe_enable(struct mmc_host *mmc) 3907 { 3908 struct sdhci_host *host = mmc_priv(mmc); 3909 unsigned long flags; 3910 u8 ctrl; 3911 3912 spin_lock_irqsave(&host->lock, flags); 3913 3914 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3915 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3916 /* 3917 * Host from V4.10 supports ADMA3 DMA type. 3918 * ADMA3 performs integrated descriptor which is more suitable 3919 * for cmd queuing to fetch both command and transfer descriptors. 3920 */ 3921 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3922 ctrl |= SDHCI_CTRL_ADMA3; 3923 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3924 ctrl |= SDHCI_CTRL_ADMA64; 3925 else 3926 ctrl |= SDHCI_CTRL_ADMA32; 3927 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3928 3929 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3930 SDHCI_BLOCK_SIZE); 3931 3932 /* Set maximum timeout */ 3933 sdhci_set_timeout(host, NULL); 3934 3935 host->ier = host->cqe_ier; 3936 3937 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3938 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3939 3940 host->cqe_on = true; 3941 3942 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3943 mmc_hostname(mmc), host->ier, 3944 sdhci_readl(host, SDHCI_INT_STATUS)); 3945 3946 spin_unlock_irqrestore(&host->lock, flags); 3947 } 3948 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3949 3950 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3951 { 3952 struct sdhci_host *host = mmc_priv(mmc); 3953 unsigned long flags; 3954 3955 spin_lock_irqsave(&host->lock, flags); 3956 3957 sdhci_set_default_irqs(host); 3958 3959 host->cqe_on = false; 3960 3961 if (recovery) 3962 sdhci_reset_for(host, CQE_RECOVERY); 3963 3964 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3965 mmc_hostname(mmc), host->ier, 3966 sdhci_readl(host, SDHCI_INT_STATUS)); 3967 3968 spin_unlock_irqrestore(&host->lock, flags); 3969 } 3970 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3971 3972 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3973 int *data_error) 3974 { 3975 u32 mask; 3976 3977 if (!host->cqe_on) 3978 return false; 3979 3980 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 3981 *cmd_error = -EILSEQ; 3982 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3983 sdhci_err_stats_inc(host, CMD_CRC); 3984 } else if (intmask & SDHCI_INT_TIMEOUT) { 3985 *cmd_error = -ETIMEDOUT; 3986 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3987 } else 3988 *cmd_error = 0; 3989 3990 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) { 3991 *data_error = -EILSEQ; 3992 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3993 sdhci_err_stats_inc(host, DAT_CRC); 3994 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3995 *data_error = -ETIMEDOUT; 3996 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3997 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3998 *data_error = -EIO; 3999 sdhci_err_stats_inc(host, ADMA); 4000 } else 4001 *data_error = 0; 4002 4003 /* Clear selected interrupts. */ 4004 mask = intmask & host->cqe_ier; 4005 sdhci_writel(host, mask, SDHCI_INT_STATUS); 4006 4007 if (intmask & SDHCI_INT_BUS_POWER) 4008 pr_err("%s: Card is consuming too much power!\n", 4009 mmc_hostname(host->mmc)); 4010 4011 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 4012 if (intmask) { 4013 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 4014 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 4015 mmc_hostname(host->mmc), intmask); 4016 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 4017 sdhci_dumpregs(host); 4018 } 4019 4020 return true; 4021 } 4022 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 4023 4024 /*****************************************************************************\ 4025 * * 4026 * Device allocation/registration * 4027 * * 4028 \*****************************************************************************/ 4029 4030 struct sdhci_host *sdhci_alloc_host(struct device *dev, 4031 size_t priv_size) 4032 { 4033 struct mmc_host *mmc; 4034 struct sdhci_host *host; 4035 4036 WARN_ON(dev == NULL); 4037 4038 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4039 if (!mmc) 4040 return ERR_PTR(-ENOMEM); 4041 4042 host = mmc_priv(mmc); 4043 host->mmc = mmc; 4044 host->mmc_host_ops = sdhci_ops; 4045 mmc->ops = &host->mmc_host_ops; 4046 4047 host->flags = SDHCI_SIGNALING_330; 4048 4049 host->cqe_ier = SDHCI_CQE_INT_MASK; 4050 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4051 4052 host->tuning_delay = -1; 4053 host->tuning_loop_count = MAX_TUNING_LOOP; 4054 4055 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4056 4057 /* 4058 * The DMA table descriptor count is calculated as the maximum 4059 * number of segments times 2, to allow for an alignment 4060 * descriptor for each segment, plus 1 for a nop end descriptor. 4061 */ 4062 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4063 host->max_adma = 65536; 4064 4065 host->max_timeout_count = 0xE; 4066 4067 return host; 4068 } 4069 4070 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4071 4072 static int sdhci_set_dma_mask(struct sdhci_host *host) 4073 { 4074 struct mmc_host *mmc = host->mmc; 4075 struct device *dev = mmc_dev(mmc); 4076 int ret = -EINVAL; 4077 4078 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4079 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4080 4081 /* Try 64-bit mask if hardware is capable of it */ 4082 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4083 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4084 if (ret) { 4085 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4086 mmc_hostname(mmc)); 4087 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4088 } 4089 } 4090 4091 /* 32-bit mask as default & fallback */ 4092 if (ret) { 4093 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4094 if (ret) 4095 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4096 mmc_hostname(mmc)); 4097 } 4098 4099 return ret; 4100 } 4101 4102 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4103 const u32 *caps, const u32 *caps1) 4104 { 4105 u16 v; 4106 u64 dt_caps_mask = 0; 4107 u64 dt_caps = 0; 4108 4109 if (host->read_caps) 4110 return; 4111 4112 host->read_caps = true; 4113 4114 if (debug_quirks) 4115 host->quirks = debug_quirks; 4116 4117 if (debug_quirks2) 4118 host->quirks2 = debug_quirks2; 4119 4120 sdhci_reset_for_all(host); 4121 4122 if (host->v4_mode) 4123 sdhci_do_enable_v4_mode(host); 4124 4125 device_property_read_u64(mmc_dev(host->mmc), 4126 "sdhci-caps-mask", &dt_caps_mask); 4127 device_property_read_u64(mmc_dev(host->mmc), 4128 "sdhci-caps", &dt_caps); 4129 4130 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4131 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4132 4133 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 4134 return; 4135 4136 if (caps) { 4137 host->caps = *caps; 4138 } else { 4139 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4140 host->caps &= ~lower_32_bits(dt_caps_mask); 4141 host->caps |= lower_32_bits(dt_caps); 4142 } 4143 4144 if (host->version < SDHCI_SPEC_300) 4145 return; 4146 4147 if (caps1) { 4148 host->caps1 = *caps1; 4149 } else { 4150 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4151 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4152 host->caps1 |= upper_32_bits(dt_caps); 4153 } 4154 } 4155 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4156 4157 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4158 { 4159 struct mmc_host *mmc = host->mmc; 4160 unsigned int max_blocks; 4161 unsigned int bounce_size; 4162 int ret; 4163 4164 /* 4165 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4166 * has diminishing returns, this is probably because SD/MMC 4167 * cards are usually optimized to handle this size of requests. 4168 */ 4169 bounce_size = SZ_64K; 4170 /* 4171 * Adjust downwards to maximum request size if this is less 4172 * than our segment size, else hammer down the maximum 4173 * request size to the maximum buffer size. 4174 */ 4175 if (mmc->max_req_size < bounce_size) 4176 bounce_size = mmc->max_req_size; 4177 max_blocks = bounce_size / 512; 4178 4179 /* 4180 * When we just support one segment, we can get significant 4181 * speedups by the help of a bounce buffer to group scattered 4182 * reads/writes together. 4183 */ 4184 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4185 bounce_size, 4186 GFP_KERNEL); 4187 if (!host->bounce_buffer) { 4188 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4189 mmc_hostname(mmc), 4190 bounce_size); 4191 /* 4192 * Exiting with zero here makes sure we proceed with 4193 * mmc->max_segs == 1. 4194 */ 4195 return; 4196 } 4197 4198 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4199 host->bounce_buffer, 4200 bounce_size, 4201 DMA_BIDIRECTIONAL); 4202 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4203 if (ret) { 4204 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4205 host->bounce_buffer = NULL; 4206 /* Again fall back to max_segs == 1 */ 4207 return; 4208 } 4209 4210 host->bounce_buffer_size = bounce_size; 4211 4212 /* Lie about this since we're bouncing */ 4213 mmc->max_segs = max_blocks; 4214 mmc->max_seg_size = bounce_size; 4215 mmc->max_req_size = bounce_size; 4216 4217 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4218 mmc_hostname(mmc), max_blocks, bounce_size); 4219 } 4220 4221 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4222 { 4223 /* 4224 * According to SD Host Controller spec v4.10, bit[27] added from 4225 * version 4.10 in Capabilities Register is used as 64-bit System 4226 * Address support for V4 mode. 4227 */ 4228 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4229 return host->caps & SDHCI_CAN_64BIT_V4; 4230 4231 return host->caps & SDHCI_CAN_64BIT; 4232 } 4233 4234 int sdhci_setup_host(struct sdhci_host *host) 4235 { 4236 struct mmc_host *mmc; 4237 u32 max_current_caps; 4238 unsigned int ocr_avail; 4239 unsigned int override_timeout_clk; 4240 u32 max_clk; 4241 int ret = 0; 4242 bool enable_vqmmc = false; 4243 4244 WARN_ON(host == NULL); 4245 if (host == NULL) 4246 return -EINVAL; 4247 4248 mmc = host->mmc; 4249 4250 /* 4251 * If there are external regulators, get them. Note this must be done 4252 * early before resetting the host and reading the capabilities so that 4253 * the host can take the appropriate action if regulators are not 4254 * available. 4255 */ 4256 if (!mmc->supply.vqmmc) { 4257 ret = mmc_regulator_get_supply(mmc); 4258 if (ret) 4259 return ret; 4260 enable_vqmmc = true; 4261 } 4262 4263 DBG("Version: 0x%08x | Present: 0x%08x\n", 4264 sdhci_readw(host, SDHCI_HOST_VERSION), 4265 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4266 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4267 sdhci_readl(host, SDHCI_CAPABILITIES), 4268 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4269 4270 sdhci_read_caps(host); 4271 4272 override_timeout_clk = host->timeout_clk; 4273 4274 if (host->version > SDHCI_SPEC_420) { 4275 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4276 mmc_hostname(mmc), host->version); 4277 } 4278 4279 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4280 host->flags |= SDHCI_USE_SDMA; 4281 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4282 DBG("Controller doesn't have SDMA capability\n"); 4283 else 4284 host->flags |= SDHCI_USE_SDMA; 4285 4286 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4287 (host->flags & SDHCI_USE_SDMA)) { 4288 DBG("Disabling DMA as it is marked broken\n"); 4289 host->flags &= ~SDHCI_USE_SDMA; 4290 } 4291 4292 if ((host->version >= SDHCI_SPEC_200) && 4293 (host->caps & SDHCI_CAN_DO_ADMA2)) 4294 host->flags |= SDHCI_USE_ADMA; 4295 4296 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4297 (host->flags & SDHCI_USE_ADMA)) { 4298 DBG("Disabling ADMA as it is marked broken\n"); 4299 host->flags &= ~SDHCI_USE_ADMA; 4300 } 4301 4302 if (sdhci_can_64bit_dma(host)) 4303 host->flags |= SDHCI_USE_64_BIT_DMA; 4304 4305 if (host->use_external_dma) { 4306 ret = sdhci_external_dma_init(host); 4307 if (ret == -EPROBE_DEFER) 4308 goto unreg; 4309 /* 4310 * Fall back to use the DMA/PIO integrated in standard SDHCI 4311 * instead of external DMA devices. 4312 */ 4313 else if (ret) 4314 sdhci_switch_external_dma(host, false); 4315 /* Disable internal DMA sources */ 4316 else 4317 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4318 } 4319 4320 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4321 if (host->ops->set_dma_mask) 4322 ret = host->ops->set_dma_mask(host); 4323 else 4324 ret = sdhci_set_dma_mask(host); 4325 4326 if (!ret && host->ops->enable_dma) 4327 ret = host->ops->enable_dma(host); 4328 4329 if (ret) { 4330 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4331 mmc_hostname(mmc)); 4332 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4333 4334 ret = 0; 4335 } 4336 } 4337 4338 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4339 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4340 host->flags &= ~SDHCI_USE_SDMA; 4341 4342 if (host->flags & SDHCI_USE_ADMA) { 4343 dma_addr_t dma; 4344 void *buf; 4345 4346 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4347 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4348 else if (!host->alloc_desc_sz) 4349 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4350 4351 host->desc_sz = host->alloc_desc_sz; 4352 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4353 4354 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4355 /* 4356 * Use zalloc to zero the reserved high 32-bits of 128-bit 4357 * descriptors so that they never need to be written. 4358 */ 4359 buf = dma_alloc_coherent(mmc_dev(mmc), 4360 host->align_buffer_sz + host->adma_table_sz, 4361 &dma, GFP_KERNEL); 4362 if (!buf) { 4363 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4364 mmc_hostname(mmc)); 4365 host->flags &= ~SDHCI_USE_ADMA; 4366 } else if ((dma + host->align_buffer_sz) & 4367 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4368 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4369 mmc_hostname(mmc)); 4370 host->flags &= ~SDHCI_USE_ADMA; 4371 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4372 host->adma_table_sz, buf, dma); 4373 } else { 4374 host->align_buffer = buf; 4375 host->align_addr = dma; 4376 4377 host->adma_table = buf + host->align_buffer_sz; 4378 host->adma_addr = dma + host->align_buffer_sz; 4379 } 4380 } 4381 4382 /* 4383 * If we use DMA, then it's up to the caller to set the DMA 4384 * mask, but PIO does not need the hw shim so we set a new 4385 * mask here in that case. 4386 */ 4387 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4388 host->dma_mask = DMA_BIT_MASK(64); 4389 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4390 } 4391 4392 if (host->version >= SDHCI_SPEC_300) 4393 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4394 else 4395 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4396 4397 host->max_clk *= 1000000; 4398 if (host->max_clk == 0 || host->quirks & 4399 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4400 if (!host->ops->get_max_clock) { 4401 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4402 mmc_hostname(mmc)); 4403 ret = -ENODEV; 4404 goto undma; 4405 } 4406 host->max_clk = host->ops->get_max_clock(host); 4407 } 4408 4409 /* 4410 * In case of Host Controller v3.00, find out whether clock 4411 * multiplier is supported. 4412 */ 4413 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4414 4415 /* 4416 * In case the value in Clock Multiplier is 0, then programmable 4417 * clock mode is not supported, otherwise the actual clock 4418 * multiplier is one more than the value of Clock Multiplier 4419 * in the Capabilities Register. 4420 */ 4421 if (host->clk_mul) 4422 host->clk_mul += 1; 4423 4424 /* 4425 * Set host parameters. 4426 */ 4427 max_clk = host->max_clk; 4428 4429 if (host->ops->get_min_clock) 4430 mmc->f_min = host->ops->get_min_clock(host); 4431 else if (host->version >= SDHCI_SPEC_300) { 4432 if (host->clk_mul) 4433 max_clk = host->max_clk * host->clk_mul; 4434 /* 4435 * Divided Clock Mode minimum clock rate is always less than 4436 * Programmable Clock Mode minimum clock rate. 4437 */ 4438 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4439 } else 4440 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4441 4442 if (!mmc->f_max || mmc->f_max > max_clk) 4443 mmc->f_max = max_clk; 4444 4445 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4446 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4447 4448 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4449 host->timeout_clk *= 1000; 4450 4451 if (host->timeout_clk == 0) { 4452 if (!host->ops->get_timeout_clock) { 4453 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4454 mmc_hostname(mmc)); 4455 ret = -ENODEV; 4456 goto undma; 4457 } 4458 4459 host->timeout_clk = 4460 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4461 1000); 4462 } 4463 4464 if (override_timeout_clk) 4465 host->timeout_clk = override_timeout_clk; 4466 4467 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4468 host->ops->get_max_timeout_count(host) : 1 << 27; 4469 mmc->max_busy_timeout /= host->timeout_clk; 4470 } 4471 4472 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4473 !host->ops->get_max_timeout_count) 4474 mmc->max_busy_timeout = 0; 4475 4476 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4477 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4478 4479 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4480 host->flags |= SDHCI_AUTO_CMD12; 4481 4482 /* 4483 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4484 * For v4 mode, SDMA may use Auto-CMD23 as well. 4485 */ 4486 if ((host->version >= SDHCI_SPEC_300) && 4487 ((host->flags & SDHCI_USE_ADMA) || 4488 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4489 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4490 host->flags |= SDHCI_AUTO_CMD23; 4491 DBG("Auto-CMD23 available\n"); 4492 } else { 4493 DBG("Auto-CMD23 unavailable\n"); 4494 } 4495 4496 /* 4497 * A controller may support 8-bit width, but the board itself 4498 * might not have the pins brought out. Boards that support 4499 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4500 * their platform code before calling sdhci_add_host(), and we 4501 * won't assume 8-bit width for hosts without that CAP. 4502 */ 4503 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4504 mmc->caps |= MMC_CAP_4_BIT_DATA; 4505 4506 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4507 mmc->caps &= ~MMC_CAP_CMD23; 4508 4509 if (host->caps & SDHCI_CAN_DO_HISPD) 4510 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4511 4512 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4513 mmc_card_is_removable(mmc) && 4514 mmc_gpio_get_cd(mmc) < 0) 4515 mmc->caps |= MMC_CAP_NEEDS_POLL; 4516 4517 if (!IS_ERR(mmc->supply.vqmmc)) { 4518 if (enable_vqmmc) { 4519 ret = regulator_enable(mmc->supply.vqmmc); 4520 host->sdhci_core_to_disable_vqmmc = !ret; 4521 } 4522 4523 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4524 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4525 1950000)) 4526 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4527 SDHCI_SUPPORT_SDR50 | 4528 SDHCI_SUPPORT_DDR50); 4529 4530 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4531 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4532 3600000)) 4533 host->flags &= ~SDHCI_SIGNALING_330; 4534 4535 if (ret) { 4536 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4537 mmc_hostname(mmc), ret); 4538 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4539 } 4540 4541 } 4542 4543 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4544 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4545 SDHCI_SUPPORT_DDR50); 4546 /* 4547 * The SDHCI controller in a SoC might support HS200/HS400 4548 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4549 * but if the board is modeled such that the IO lines are not 4550 * connected to 1.8v then HS200/HS400 cannot be supported. 4551 * Disable HS200/HS400 if the board does not have 1.8v connected 4552 * to the IO lines. (Applicable for other modes in 1.8v) 4553 */ 4554 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4555 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4556 } 4557 4558 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4559 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4560 SDHCI_SUPPORT_DDR50)) 4561 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4562 4563 /* SDR104 supports also implies SDR50 support */ 4564 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4565 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4566 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4567 * field can be promoted to support HS200. 4568 */ 4569 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4570 mmc->caps2 |= MMC_CAP2_HS200; 4571 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4572 mmc->caps |= MMC_CAP_UHS_SDR50; 4573 } 4574 4575 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4576 (host->caps1 & SDHCI_SUPPORT_HS400)) 4577 mmc->caps2 |= MMC_CAP2_HS400; 4578 4579 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4580 (IS_ERR(mmc->supply.vqmmc) || 4581 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4582 1300000))) 4583 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4584 4585 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4586 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4587 mmc->caps |= MMC_CAP_UHS_DDR50; 4588 4589 /* Does the host need tuning for SDR50? */ 4590 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4591 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4592 4593 /* Driver Type(s) (A, C, D) supported by the host */ 4594 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4595 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4596 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4597 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4598 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4599 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4600 4601 /* Initial value for re-tuning timer count */ 4602 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4603 host->caps1); 4604 4605 /* 4606 * In case Re-tuning Timer is not disabled, the actual value of 4607 * re-tuning timer will be 2 ^ (n - 1). 4608 */ 4609 if (host->tuning_count) 4610 host->tuning_count = 1 << (host->tuning_count - 1); 4611 4612 /* Re-tuning mode supported by the Host Controller */ 4613 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4614 4615 ocr_avail = 0; 4616 4617 /* 4618 * According to SD Host Controller spec v3.00, if the Host System 4619 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4620 * the value is meaningful only if Voltage Support in the Capabilities 4621 * register is set. The actual current value is 4 times the register 4622 * value. 4623 */ 4624 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4625 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4626 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4627 if (curr > 0) { 4628 4629 /* convert to SDHCI_MAX_CURRENT format */ 4630 curr = curr/1000; /* convert to mA */ 4631 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4632 4633 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4634 max_current_caps = 4635 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4636 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4637 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4638 } 4639 } 4640 4641 if (host->caps & SDHCI_CAN_VDD_330) { 4642 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4643 4644 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4645 max_current_caps) * 4646 SDHCI_MAX_CURRENT_MULTIPLIER; 4647 } 4648 if (host->caps & SDHCI_CAN_VDD_300) { 4649 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4650 4651 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4652 max_current_caps) * 4653 SDHCI_MAX_CURRENT_MULTIPLIER; 4654 } 4655 if (host->caps & SDHCI_CAN_VDD_180) { 4656 ocr_avail |= MMC_VDD_165_195; 4657 4658 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4659 max_current_caps) * 4660 SDHCI_MAX_CURRENT_MULTIPLIER; 4661 } 4662 4663 /* If OCR set by host, use it instead. */ 4664 if (host->ocr_mask) 4665 ocr_avail = host->ocr_mask; 4666 4667 /* If OCR set by external regulators, give it highest prio. */ 4668 if (mmc->ocr_avail) 4669 ocr_avail = mmc->ocr_avail; 4670 4671 mmc->ocr_avail = ocr_avail; 4672 mmc->ocr_avail_sdio = ocr_avail; 4673 if (host->ocr_avail_sdio) 4674 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4675 mmc->ocr_avail_sd = ocr_avail; 4676 if (host->ocr_avail_sd) 4677 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4678 else /* normal SD controllers don't support 1.8V */ 4679 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4680 mmc->ocr_avail_mmc = ocr_avail; 4681 if (host->ocr_avail_mmc) 4682 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4683 4684 if (mmc->ocr_avail == 0) { 4685 pr_err("%s: Hardware doesn't report any support voltages.\n", 4686 mmc_hostname(mmc)); 4687 ret = -ENODEV; 4688 goto unreg; 4689 } 4690 4691 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4692 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4693 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4694 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4695 host->flags |= SDHCI_SIGNALING_180; 4696 4697 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4698 host->flags |= SDHCI_SIGNALING_120; 4699 4700 spin_lock_init(&host->lock); 4701 4702 /* 4703 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4704 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4705 * is less anyway. 4706 */ 4707 mmc->max_req_size = 524288; 4708 4709 /* 4710 * Maximum number of segments. Depends on if the hardware 4711 * can do scatter/gather or not. 4712 */ 4713 if (host->flags & SDHCI_USE_ADMA) { 4714 mmc->max_segs = SDHCI_MAX_SEGS; 4715 } else if (host->flags & SDHCI_USE_SDMA) { 4716 mmc->max_segs = 1; 4717 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4718 dma_max_mapping_size(mmc_dev(mmc))); 4719 } else { /* PIO */ 4720 mmc->max_segs = SDHCI_MAX_SEGS; 4721 } 4722 4723 /* 4724 * Maximum segment size. Could be one segment with the maximum number 4725 * of bytes. When doing hardware scatter/gather, each entry cannot 4726 * be larger than 64 KiB though. 4727 */ 4728 if (host->flags & SDHCI_USE_ADMA) { 4729 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4730 host->max_adma = 65532; /* 32-bit alignment */ 4731 mmc->max_seg_size = 65535; 4732 } else { 4733 mmc->max_seg_size = 65536; 4734 } 4735 } else { 4736 mmc->max_seg_size = mmc->max_req_size; 4737 } 4738 4739 /* 4740 * Maximum block size. This varies from controller to controller and 4741 * is specified in the capabilities register. 4742 */ 4743 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4744 mmc->max_blk_size = 2; 4745 } else { 4746 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4747 SDHCI_MAX_BLOCK_SHIFT; 4748 if (mmc->max_blk_size >= 3) { 4749 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4750 mmc_hostname(mmc)); 4751 mmc->max_blk_size = 0; 4752 } 4753 } 4754 4755 mmc->max_blk_size = 512 << mmc->max_blk_size; 4756 4757 /* 4758 * Maximum block count. 4759 */ 4760 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4761 4762 if (mmc->max_segs == 1) 4763 /* This may alter mmc->*_blk_* parameters */ 4764 sdhci_allocate_bounce_buffer(host); 4765 4766 return 0; 4767 4768 unreg: 4769 if (host->sdhci_core_to_disable_vqmmc) 4770 regulator_disable(mmc->supply.vqmmc); 4771 undma: 4772 if (host->align_buffer) 4773 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4774 host->adma_table_sz, host->align_buffer, 4775 host->align_addr); 4776 host->adma_table = NULL; 4777 host->align_buffer = NULL; 4778 4779 return ret; 4780 } 4781 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4782 4783 void sdhci_cleanup_host(struct sdhci_host *host) 4784 { 4785 struct mmc_host *mmc = host->mmc; 4786 4787 if (host->sdhci_core_to_disable_vqmmc) 4788 regulator_disable(mmc->supply.vqmmc); 4789 4790 if (host->align_buffer) 4791 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4792 host->adma_table_sz, host->align_buffer, 4793 host->align_addr); 4794 4795 if (host->use_external_dma) 4796 sdhci_external_dma_release(host); 4797 4798 host->adma_table = NULL; 4799 host->align_buffer = NULL; 4800 } 4801 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4802 4803 int __sdhci_add_host(struct sdhci_host *host) 4804 { 4805 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4806 struct mmc_host *mmc = host->mmc; 4807 int ret; 4808 4809 if ((mmc->caps2 & MMC_CAP2_CQE) && 4810 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4811 mmc->caps2 &= ~MMC_CAP2_CQE; 4812 mmc->cqe_ops = NULL; 4813 } 4814 4815 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4816 if (!host->complete_wq) 4817 return -ENOMEM; 4818 4819 INIT_WORK(&host->complete_work, sdhci_complete_work); 4820 4821 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4822 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4823 4824 init_waitqueue_head(&host->buf_ready_int); 4825 4826 sdhci_init(host, 0); 4827 4828 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4829 IRQF_SHARED, mmc_hostname(mmc), host); 4830 if (ret) { 4831 pr_err("%s: Failed to request IRQ %d: %d\n", 4832 mmc_hostname(mmc), host->irq, ret); 4833 goto unwq; 4834 } 4835 4836 ret = sdhci_led_register(host); 4837 if (ret) { 4838 pr_err("%s: Failed to register LED device: %d\n", 4839 mmc_hostname(mmc), ret); 4840 goto unirq; 4841 } 4842 4843 ret = mmc_add_host(mmc); 4844 if (ret) 4845 goto unled; 4846 4847 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4848 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4849 host->use_external_dma ? "External DMA" : 4850 (host->flags & SDHCI_USE_ADMA) ? 4851 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4852 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4853 4854 sdhci_enable_card_detection(host); 4855 4856 return 0; 4857 4858 unled: 4859 sdhci_led_unregister(host); 4860 unirq: 4861 sdhci_reset_for_all(host); 4862 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4863 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4864 free_irq(host->irq, host); 4865 unwq: 4866 destroy_workqueue(host->complete_wq); 4867 4868 return ret; 4869 } 4870 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4871 4872 int sdhci_add_host(struct sdhci_host *host) 4873 { 4874 int ret; 4875 4876 ret = sdhci_setup_host(host); 4877 if (ret) 4878 return ret; 4879 4880 ret = __sdhci_add_host(host); 4881 if (ret) 4882 goto cleanup; 4883 4884 return 0; 4885 4886 cleanup: 4887 sdhci_cleanup_host(host); 4888 4889 return ret; 4890 } 4891 EXPORT_SYMBOL_GPL(sdhci_add_host); 4892 4893 void sdhci_remove_host(struct sdhci_host *host, int dead) 4894 { 4895 struct mmc_host *mmc = host->mmc; 4896 unsigned long flags; 4897 4898 if (dead) { 4899 spin_lock_irqsave(&host->lock, flags); 4900 4901 host->flags |= SDHCI_DEVICE_DEAD; 4902 4903 if (sdhci_has_requests(host)) { 4904 pr_err("%s: Controller removed during " 4905 " transfer!\n", mmc_hostname(mmc)); 4906 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4907 } 4908 4909 spin_unlock_irqrestore(&host->lock, flags); 4910 } 4911 4912 sdhci_disable_card_detection(host); 4913 4914 mmc_remove_host(mmc); 4915 4916 sdhci_led_unregister(host); 4917 4918 if (!dead) 4919 sdhci_reset_for_all(host); 4920 4921 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4922 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4923 free_irq(host->irq, host); 4924 4925 del_timer_sync(&host->timer); 4926 del_timer_sync(&host->data_timer); 4927 4928 destroy_workqueue(host->complete_wq); 4929 4930 if (host->sdhci_core_to_disable_vqmmc) 4931 regulator_disable(mmc->supply.vqmmc); 4932 4933 if (host->align_buffer) 4934 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4935 host->adma_table_sz, host->align_buffer, 4936 host->align_addr); 4937 4938 if (host->use_external_dma) 4939 sdhci_external_dma_release(host); 4940 4941 host->adma_table = NULL; 4942 host->align_buffer = NULL; 4943 } 4944 4945 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4946 4947 void sdhci_free_host(struct sdhci_host *host) 4948 { 4949 mmc_free_host(host->mmc); 4950 } 4951 4952 EXPORT_SYMBOL_GPL(sdhci_free_host); 4953 4954 /*****************************************************************************\ 4955 * * 4956 * Driver init/exit * 4957 * * 4958 \*****************************************************************************/ 4959 4960 static int __init sdhci_drv_init(void) 4961 { 4962 pr_info(DRIVER_NAME 4963 ": Secure Digital Host Controller Interface driver\n"); 4964 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4965 4966 return 0; 4967 } 4968 4969 static void __exit sdhci_drv_exit(void) 4970 { 4971 } 4972 4973 module_init(sdhci_drv_init); 4974 module_exit(sdhci_drv_exit); 4975 4976 module_param(debug_quirks, uint, 0444); 4977 module_param(debug_quirks2, uint, 0444); 4978 4979 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4980 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4981 MODULE_LICENSE("GPL"); 4982 4983 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4984 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4985