1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 53 54 void sdhci_dumpregs(struct sdhci_host *host) 55 { 56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 57 58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 92 sdhci_readl(host, SDHCI_RESPONSE), 93 sdhci_readl(host, SDHCI_RESPONSE + 4)); 94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE + 8), 96 sdhci_readl(host, SDHCI_RESPONSE + 12)); 97 SDHCI_DUMP("Host ctl2: 0x%08x\n", 98 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 99 100 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_64_BIT_DMA) { 102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 sdhci_readl(host, SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 106 } else { 107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 sdhci_readl(host, SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 110 } 111 } 112 113 if (host->ops->dump_vendor_regs) 114 host->ops->dump_vendor_regs(host); 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (1) { 220 bool timedout = ktime_after(ktime_get(), timeout); 221 222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 223 break; 224 if (timedout) { 225 pr_err("%s: Reset 0x%x never completed.\n", 226 mmc_hostname(host->mmc), (int)mask); 227 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 228 sdhci_dumpregs(host); 229 return; 230 } 231 udelay(10); 232 } 233 } 234 EXPORT_SYMBOL_GPL(sdhci_reset); 235 236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 { 238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 239 struct mmc_host *mmc = host->mmc; 240 241 if (!mmc->ops->get_cd(mmc)) 242 return false; 243 } 244 245 host->ops->reset(host, mask); 246 247 return true; 248 } 249 250 static void sdhci_reset_for_all(struct sdhci_host *host) 251 { 252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 254 if (host->ops->enable_dma) 255 host->ops->enable_dma(host); 256 } 257 /* Resetting the controller clears many */ 258 host->preset_enabled = false; 259 } 260 } 261 262 enum sdhci_reset_reason { 263 SDHCI_RESET_FOR_INIT, 264 SDHCI_RESET_FOR_REQUEST_ERROR, 265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 266 SDHCI_RESET_FOR_TUNING_ABORT, 267 SDHCI_RESET_FOR_CARD_REMOVED, 268 SDHCI_RESET_FOR_CQE_RECOVERY, 269 }; 270 271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 272 { 273 switch (reason) { 274 case SDHCI_RESET_FOR_INIT: 275 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 276 break; 277 case SDHCI_RESET_FOR_REQUEST_ERROR: 278 case SDHCI_RESET_FOR_TUNING_ABORT: 279 case SDHCI_RESET_FOR_CARD_REMOVED: 280 case SDHCI_RESET_FOR_CQE_RECOVERY: 281 sdhci_do_reset(host, SDHCI_RESET_CMD); 282 sdhci_do_reset(host, SDHCI_RESET_DATA); 283 break; 284 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 285 sdhci_do_reset(host, SDHCI_RESET_DATA); 286 break; 287 } 288 } 289 290 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 291 292 static void sdhci_set_default_irqs(struct sdhci_host *host) 293 { 294 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 295 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 296 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 297 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 298 SDHCI_INT_RESPONSE; 299 300 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 301 host->tuning_mode == SDHCI_TUNING_MODE_3) 302 host->ier |= SDHCI_INT_RETUNE; 303 304 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 305 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 306 } 307 308 static void sdhci_config_dma(struct sdhci_host *host) 309 { 310 u8 ctrl; 311 u16 ctrl2; 312 313 if (host->version < SDHCI_SPEC_200) 314 return; 315 316 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 317 318 /* 319 * Always adjust the DMA selection as some controllers 320 * (e.g. JMicron) can't do PIO properly when the selection 321 * is ADMA. 322 */ 323 ctrl &= ~SDHCI_CTRL_DMA_MASK; 324 if (!(host->flags & SDHCI_REQ_USE_DMA)) 325 goto out; 326 327 /* Note if DMA Select is zero then SDMA is selected */ 328 if (host->flags & SDHCI_USE_ADMA) 329 ctrl |= SDHCI_CTRL_ADMA32; 330 331 if (host->flags & SDHCI_USE_64_BIT_DMA) { 332 /* 333 * If v4 mode, all supported DMA can be 64-bit addressing if 334 * controller supports 64-bit system address, otherwise only 335 * ADMA can support 64-bit addressing. 336 */ 337 if (host->v4_mode) { 338 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 339 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 340 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 341 } else if (host->flags & SDHCI_USE_ADMA) { 342 /* 343 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 344 * set SDHCI_CTRL_ADMA64. 345 */ 346 ctrl |= SDHCI_CTRL_ADMA64; 347 } 348 } 349 350 out: 351 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 352 } 353 354 static void sdhci_init(struct sdhci_host *host, int soft) 355 { 356 struct mmc_host *mmc = host->mmc; 357 unsigned long flags; 358 359 if (soft) 360 sdhci_reset_for(host, INIT); 361 else 362 sdhci_reset_for_all(host); 363 364 if (host->v4_mode) 365 sdhci_do_enable_v4_mode(host); 366 367 spin_lock_irqsave(&host->lock, flags); 368 sdhci_set_default_irqs(host); 369 spin_unlock_irqrestore(&host->lock, flags); 370 371 host->cqe_on = false; 372 373 if (soft) { 374 /* force clock reconfiguration */ 375 host->clock = 0; 376 host->reinit_uhs = true; 377 mmc->ops->set_ios(mmc, &mmc->ios); 378 } 379 } 380 381 static void sdhci_reinit(struct sdhci_host *host) 382 { 383 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 384 385 sdhci_init(host, 0); 386 sdhci_enable_card_detection(host); 387 388 /* 389 * A change to the card detect bits indicates a change in present state, 390 * refer sdhci_set_card_detection(). A card detect interrupt might have 391 * been missed while the host controller was being reset, so trigger a 392 * rescan to check. 393 */ 394 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 395 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 396 } 397 398 static void __sdhci_led_activate(struct sdhci_host *host) 399 { 400 u8 ctrl; 401 402 if (host->quirks & SDHCI_QUIRK_NO_LED) 403 return; 404 405 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 406 ctrl |= SDHCI_CTRL_LED; 407 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 408 } 409 410 static void __sdhci_led_deactivate(struct sdhci_host *host) 411 { 412 u8 ctrl; 413 414 if (host->quirks & SDHCI_QUIRK_NO_LED) 415 return; 416 417 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 418 ctrl &= ~SDHCI_CTRL_LED; 419 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 420 } 421 422 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 423 static void sdhci_led_control(struct led_classdev *led, 424 enum led_brightness brightness) 425 { 426 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 427 unsigned long flags; 428 429 spin_lock_irqsave(&host->lock, flags); 430 431 if (host->runtime_suspended) 432 goto out; 433 434 if (brightness == LED_OFF) 435 __sdhci_led_deactivate(host); 436 else 437 __sdhci_led_activate(host); 438 out: 439 spin_unlock_irqrestore(&host->lock, flags); 440 } 441 442 static int sdhci_led_register(struct sdhci_host *host) 443 { 444 struct mmc_host *mmc = host->mmc; 445 446 if (host->quirks & SDHCI_QUIRK_NO_LED) 447 return 0; 448 449 snprintf(host->led_name, sizeof(host->led_name), 450 "%s::", mmc_hostname(mmc)); 451 452 host->led.name = host->led_name; 453 host->led.brightness = LED_OFF; 454 host->led.default_trigger = mmc_hostname(mmc); 455 host->led.brightness_set = sdhci_led_control; 456 457 return led_classdev_register(mmc_dev(mmc), &host->led); 458 } 459 460 static void sdhci_led_unregister(struct sdhci_host *host) 461 { 462 if (host->quirks & SDHCI_QUIRK_NO_LED) 463 return; 464 465 led_classdev_unregister(&host->led); 466 } 467 468 static inline void sdhci_led_activate(struct sdhci_host *host) 469 { 470 } 471 472 static inline void sdhci_led_deactivate(struct sdhci_host *host) 473 { 474 } 475 476 #else 477 478 static inline int sdhci_led_register(struct sdhci_host *host) 479 { 480 return 0; 481 } 482 483 static inline void sdhci_led_unregister(struct sdhci_host *host) 484 { 485 } 486 487 static inline void sdhci_led_activate(struct sdhci_host *host) 488 { 489 __sdhci_led_activate(host); 490 } 491 492 static inline void sdhci_led_deactivate(struct sdhci_host *host) 493 { 494 __sdhci_led_deactivate(host); 495 } 496 497 #endif 498 499 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 500 unsigned long timeout) 501 { 502 if (sdhci_data_line_cmd(mrq->cmd)) 503 mod_timer(&host->data_timer, timeout); 504 else 505 mod_timer(&host->timer, timeout); 506 } 507 508 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 509 { 510 if (sdhci_data_line_cmd(mrq->cmd)) 511 del_timer(&host->data_timer); 512 else 513 del_timer(&host->timer); 514 } 515 516 static inline bool sdhci_has_requests(struct sdhci_host *host) 517 { 518 return host->cmd || host->data_cmd; 519 } 520 521 /*****************************************************************************\ 522 * * 523 * Core functions * 524 * * 525 \*****************************************************************************/ 526 527 static void sdhci_read_block_pio(struct sdhci_host *host) 528 { 529 unsigned long flags; 530 size_t blksize, len, chunk; 531 u32 scratch; 532 u8 *buf; 533 534 DBG("PIO reading\n"); 535 536 blksize = host->data->blksz; 537 chunk = 0; 538 539 local_irq_save(flags); 540 541 while (blksize) { 542 BUG_ON(!sg_miter_next(&host->sg_miter)); 543 544 len = min(host->sg_miter.length, blksize); 545 546 blksize -= len; 547 host->sg_miter.consumed = len; 548 549 buf = host->sg_miter.addr; 550 551 while (len) { 552 if (chunk == 0) { 553 scratch = sdhci_readl(host, SDHCI_BUFFER); 554 chunk = 4; 555 } 556 557 *buf = scratch & 0xFF; 558 559 buf++; 560 scratch >>= 8; 561 chunk--; 562 len--; 563 } 564 } 565 566 sg_miter_stop(&host->sg_miter); 567 568 local_irq_restore(flags); 569 } 570 571 static void sdhci_write_block_pio(struct sdhci_host *host) 572 { 573 unsigned long flags; 574 size_t blksize, len, chunk; 575 u32 scratch; 576 u8 *buf; 577 578 DBG("PIO writing\n"); 579 580 blksize = host->data->blksz; 581 chunk = 0; 582 scratch = 0; 583 584 local_irq_save(flags); 585 586 while (blksize) { 587 BUG_ON(!sg_miter_next(&host->sg_miter)); 588 589 len = min(host->sg_miter.length, blksize); 590 591 blksize -= len; 592 host->sg_miter.consumed = len; 593 594 buf = host->sg_miter.addr; 595 596 while (len) { 597 scratch |= (u32)*buf << (chunk * 8); 598 599 buf++; 600 chunk++; 601 len--; 602 603 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 604 sdhci_writel(host, scratch, SDHCI_BUFFER); 605 chunk = 0; 606 scratch = 0; 607 } 608 } 609 } 610 611 sg_miter_stop(&host->sg_miter); 612 613 local_irq_restore(flags); 614 } 615 616 static void sdhci_transfer_pio(struct sdhci_host *host) 617 { 618 u32 mask; 619 620 if (host->blocks == 0) 621 return; 622 623 if (host->data->flags & MMC_DATA_READ) 624 mask = SDHCI_DATA_AVAILABLE; 625 else 626 mask = SDHCI_SPACE_AVAILABLE; 627 628 /* 629 * Some controllers (JMicron JMB38x) mess up the buffer bits 630 * for transfers < 4 bytes. As long as it is just one block, 631 * we can ignore the bits. 632 */ 633 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 634 (host->data->blocks == 1)) 635 mask = ~0; 636 637 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 638 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 639 udelay(100); 640 641 if (host->data->flags & MMC_DATA_READ) 642 sdhci_read_block_pio(host); 643 else 644 sdhci_write_block_pio(host); 645 646 host->blocks--; 647 if (host->blocks == 0) 648 break; 649 } 650 651 DBG("PIO transfer complete.\n"); 652 } 653 654 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 655 struct mmc_data *data, int cookie) 656 { 657 int sg_count; 658 659 /* 660 * If the data buffers are already mapped, return the previous 661 * dma_map_sg() result. 662 */ 663 if (data->host_cookie == COOKIE_PRE_MAPPED) 664 return data->sg_count; 665 666 /* Bounce write requests to the bounce buffer */ 667 if (host->bounce_buffer) { 668 unsigned int length = data->blksz * data->blocks; 669 670 if (length > host->bounce_buffer_size) { 671 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 672 mmc_hostname(host->mmc), length, 673 host->bounce_buffer_size); 674 return -EIO; 675 } 676 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 677 /* Copy the data to the bounce buffer */ 678 if (host->ops->copy_to_bounce_buffer) { 679 host->ops->copy_to_bounce_buffer(host, 680 data, length); 681 } else { 682 sg_copy_to_buffer(data->sg, data->sg_len, 683 host->bounce_buffer, length); 684 } 685 } 686 /* Switch ownership to the DMA */ 687 dma_sync_single_for_device(mmc_dev(host->mmc), 688 host->bounce_addr, 689 host->bounce_buffer_size, 690 mmc_get_dma_dir(data)); 691 /* Just a dummy value */ 692 sg_count = 1; 693 } else { 694 /* Just access the data directly from memory */ 695 sg_count = dma_map_sg(mmc_dev(host->mmc), 696 data->sg, data->sg_len, 697 mmc_get_dma_dir(data)); 698 } 699 700 if (sg_count == 0) 701 return -ENOSPC; 702 703 data->sg_count = sg_count; 704 data->host_cookie = cookie; 705 706 return sg_count; 707 } 708 709 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 710 { 711 local_irq_save(*flags); 712 return kmap_atomic(sg_page(sg)) + sg->offset; 713 } 714 715 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 716 { 717 kunmap_atomic(buffer); 718 local_irq_restore(*flags); 719 } 720 721 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 722 dma_addr_t addr, int len, unsigned int cmd) 723 { 724 struct sdhci_adma2_64_desc *dma_desc = *desc; 725 726 /* 32-bit and 64-bit descriptors have these members in same position */ 727 dma_desc->cmd = cpu_to_le16(cmd); 728 dma_desc->len = cpu_to_le16(len); 729 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 730 731 if (host->flags & SDHCI_USE_64_BIT_DMA) 732 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 733 734 *desc += host->desc_sz; 735 } 736 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 737 738 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 739 void **desc, dma_addr_t addr, 740 int len, unsigned int cmd) 741 { 742 if (host->ops->adma_write_desc) 743 host->ops->adma_write_desc(host, desc, addr, len, cmd); 744 else 745 sdhci_adma_write_desc(host, desc, addr, len, cmd); 746 } 747 748 static void sdhci_adma_mark_end(void *desc) 749 { 750 struct sdhci_adma2_64_desc *dma_desc = desc; 751 752 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 753 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 754 } 755 756 static void sdhci_adma_table_pre(struct sdhci_host *host, 757 struct mmc_data *data, int sg_count) 758 { 759 struct scatterlist *sg; 760 unsigned long flags; 761 dma_addr_t addr, align_addr; 762 void *desc, *align; 763 char *buffer; 764 int len, offset, i; 765 766 /* 767 * The spec does not specify endianness of descriptor table. 768 * We currently guess that it is LE. 769 */ 770 771 host->sg_count = sg_count; 772 773 desc = host->adma_table; 774 align = host->align_buffer; 775 776 align_addr = host->align_addr; 777 778 for_each_sg(data->sg, sg, host->sg_count, i) { 779 addr = sg_dma_address(sg); 780 len = sg_dma_len(sg); 781 782 /* 783 * The SDHCI specification states that ADMA addresses must 784 * be 32-bit aligned. If they aren't, then we use a bounce 785 * buffer for the (up to three) bytes that screw up the 786 * alignment. 787 */ 788 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 789 SDHCI_ADMA2_MASK; 790 if (offset) { 791 if (data->flags & MMC_DATA_WRITE) { 792 buffer = sdhci_kmap_atomic(sg, &flags); 793 memcpy(align, buffer, offset); 794 sdhci_kunmap_atomic(buffer, &flags); 795 } 796 797 /* tran, valid */ 798 __sdhci_adma_write_desc(host, &desc, align_addr, 799 offset, ADMA2_TRAN_VALID); 800 801 BUG_ON(offset > 65536); 802 803 align += SDHCI_ADMA2_ALIGN; 804 align_addr += SDHCI_ADMA2_ALIGN; 805 806 addr += offset; 807 len -= offset; 808 } 809 810 /* 811 * The block layer forces a minimum segment size of PAGE_SIZE, 812 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 813 * multiple descriptors, noting that the ADMA table is sized 814 * for 4KiB chunks anyway, so it will be big enough. 815 */ 816 while (len > host->max_adma) { 817 int n = 32 * 1024; /* 32KiB*/ 818 819 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 820 addr += n; 821 len -= n; 822 } 823 824 /* tran, valid */ 825 if (len) 826 __sdhci_adma_write_desc(host, &desc, addr, len, 827 ADMA2_TRAN_VALID); 828 829 /* 830 * If this triggers then we have a calculation bug 831 * somewhere. :/ 832 */ 833 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 834 } 835 836 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 837 /* Mark the last descriptor as the terminating descriptor */ 838 if (desc != host->adma_table) { 839 desc -= host->desc_sz; 840 sdhci_adma_mark_end(desc); 841 } 842 } else { 843 /* Add a terminating entry - nop, end, valid */ 844 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 845 } 846 } 847 848 static void sdhci_adma_table_post(struct sdhci_host *host, 849 struct mmc_data *data) 850 { 851 struct scatterlist *sg; 852 int i, size; 853 void *align; 854 char *buffer; 855 unsigned long flags; 856 857 if (data->flags & MMC_DATA_READ) { 858 bool has_unaligned = false; 859 860 /* Do a quick scan of the SG list for any unaligned mappings */ 861 for_each_sg(data->sg, sg, host->sg_count, i) 862 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 863 has_unaligned = true; 864 break; 865 } 866 867 if (has_unaligned) { 868 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 869 data->sg_len, DMA_FROM_DEVICE); 870 871 align = host->align_buffer; 872 873 for_each_sg(data->sg, sg, host->sg_count, i) { 874 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 875 size = SDHCI_ADMA2_ALIGN - 876 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 877 878 buffer = sdhci_kmap_atomic(sg, &flags); 879 memcpy(buffer, align, size); 880 sdhci_kunmap_atomic(buffer, &flags); 881 882 align += SDHCI_ADMA2_ALIGN; 883 } 884 } 885 } 886 } 887 } 888 889 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 890 { 891 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 892 if (host->flags & SDHCI_USE_64_BIT_DMA) 893 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 894 } 895 896 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 897 { 898 if (host->bounce_buffer) 899 return host->bounce_addr; 900 else 901 return sg_dma_address(host->data->sg); 902 } 903 904 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 905 { 906 if (host->v4_mode) 907 sdhci_set_adma_addr(host, addr); 908 else 909 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 910 } 911 912 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 913 struct mmc_command *cmd, 914 struct mmc_data *data) 915 { 916 unsigned int target_timeout; 917 918 /* timeout in us */ 919 if (!data) { 920 target_timeout = cmd->busy_timeout * 1000; 921 } else { 922 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 923 if (host->clock && data->timeout_clks) { 924 unsigned long long val; 925 926 /* 927 * data->timeout_clks is in units of clock cycles. 928 * host->clock is in Hz. target_timeout is in us. 929 * Hence, us = 1000000 * cycles / Hz. Round up. 930 */ 931 val = 1000000ULL * data->timeout_clks; 932 if (do_div(val, host->clock)) 933 target_timeout++; 934 target_timeout += val; 935 } 936 } 937 938 return target_timeout; 939 } 940 941 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 942 struct mmc_command *cmd) 943 { 944 struct mmc_data *data = cmd->data; 945 struct mmc_host *mmc = host->mmc; 946 struct mmc_ios *ios = &mmc->ios; 947 unsigned char bus_width = 1 << ios->bus_width; 948 unsigned int blksz; 949 unsigned int freq; 950 u64 target_timeout; 951 u64 transfer_time; 952 953 target_timeout = sdhci_target_timeout(host, cmd, data); 954 target_timeout *= NSEC_PER_USEC; 955 956 if (data) { 957 blksz = data->blksz; 958 freq = mmc->actual_clock ? : host->clock; 959 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 960 do_div(transfer_time, freq); 961 /* multiply by '2' to account for any unknowns */ 962 transfer_time = transfer_time * 2; 963 /* calculate timeout for the entire data */ 964 host->data_timeout = data->blocks * target_timeout + 965 transfer_time; 966 } else { 967 host->data_timeout = target_timeout; 968 } 969 970 if (host->data_timeout) 971 host->data_timeout += MMC_CMD_TRANSFER_TIME; 972 } 973 974 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 975 bool *too_big) 976 { 977 u8 count; 978 struct mmc_data *data; 979 unsigned target_timeout, current_timeout; 980 981 *too_big = false; 982 983 /* 984 * If the host controller provides us with an incorrect timeout 985 * value, just skip the check and use the maximum. The hardware may take 986 * longer to time out, but that's much better than having a too-short 987 * timeout value. 988 */ 989 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 990 return host->max_timeout_count; 991 992 /* Unspecified command, assume max */ 993 if (cmd == NULL) 994 return host->max_timeout_count; 995 996 data = cmd->data; 997 /* Unspecified timeout, assume max */ 998 if (!data && !cmd->busy_timeout) 999 return host->max_timeout_count; 1000 1001 /* timeout in us */ 1002 target_timeout = sdhci_target_timeout(host, cmd, data); 1003 1004 /* 1005 * Figure out needed cycles. 1006 * We do this in steps in order to fit inside a 32 bit int. 1007 * The first step is the minimum timeout, which will have a 1008 * minimum resolution of 6 bits: 1009 * (1) 2^13*1000 > 2^22, 1010 * (2) host->timeout_clk < 2^16 1011 * => 1012 * (1) / (2) > 2^6 1013 */ 1014 count = 0; 1015 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1016 while (current_timeout < target_timeout) { 1017 count++; 1018 current_timeout <<= 1; 1019 if (count > host->max_timeout_count) { 1020 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1021 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1022 count, cmd->opcode); 1023 count = host->max_timeout_count; 1024 *too_big = true; 1025 break; 1026 } 1027 } 1028 1029 return count; 1030 } 1031 1032 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1033 { 1034 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1035 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1036 1037 if (host->flags & SDHCI_REQ_USE_DMA) 1038 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1039 else 1040 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1041 1042 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1043 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1044 else 1045 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1046 1047 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1048 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1049 } 1050 1051 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1052 { 1053 if (enable) 1054 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1055 else 1056 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1057 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1058 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1059 } 1060 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1061 1062 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1063 { 1064 bool too_big = false; 1065 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1066 1067 if (too_big && 1068 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1069 sdhci_calc_sw_timeout(host, cmd); 1070 sdhci_set_data_timeout_irq(host, false); 1071 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1072 sdhci_set_data_timeout_irq(host, true); 1073 } 1074 1075 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1076 } 1077 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1078 1079 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1080 { 1081 if (host->ops->set_timeout) 1082 host->ops->set_timeout(host, cmd); 1083 else 1084 __sdhci_set_timeout(host, cmd); 1085 } 1086 1087 static void sdhci_initialize_data(struct sdhci_host *host, 1088 struct mmc_data *data) 1089 { 1090 WARN_ON(host->data); 1091 1092 /* Sanity checks */ 1093 BUG_ON(data->blksz * data->blocks > 524288); 1094 BUG_ON(data->blksz > host->mmc->max_blk_size); 1095 BUG_ON(data->blocks > 65535); 1096 1097 host->data = data; 1098 host->data_early = 0; 1099 host->data->bytes_xfered = 0; 1100 } 1101 1102 static inline void sdhci_set_block_info(struct sdhci_host *host, 1103 struct mmc_data *data) 1104 { 1105 /* Set the DMA boundary value and block size */ 1106 sdhci_writew(host, 1107 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1108 SDHCI_BLOCK_SIZE); 1109 /* 1110 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1111 * can be supported, in that case 16-bit block count register must be 0. 1112 */ 1113 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1114 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1115 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1116 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1117 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1118 } else { 1119 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1120 } 1121 } 1122 1123 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1124 { 1125 struct mmc_data *data = cmd->data; 1126 1127 sdhci_initialize_data(host, data); 1128 1129 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1130 struct scatterlist *sg; 1131 unsigned int length_mask, offset_mask; 1132 int i; 1133 1134 host->flags |= SDHCI_REQ_USE_DMA; 1135 1136 /* 1137 * FIXME: This doesn't account for merging when mapping the 1138 * scatterlist. 1139 * 1140 * The assumption here being that alignment and lengths are 1141 * the same after DMA mapping to device address space. 1142 */ 1143 length_mask = 0; 1144 offset_mask = 0; 1145 if (host->flags & SDHCI_USE_ADMA) { 1146 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1147 length_mask = 3; 1148 /* 1149 * As we use up to 3 byte chunks to work 1150 * around alignment problems, we need to 1151 * check the offset as well. 1152 */ 1153 offset_mask = 3; 1154 } 1155 } else { 1156 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1157 length_mask = 3; 1158 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1159 offset_mask = 3; 1160 } 1161 1162 if (unlikely(length_mask | offset_mask)) { 1163 for_each_sg(data->sg, sg, data->sg_len, i) { 1164 if (sg->length & length_mask) { 1165 DBG("Reverting to PIO because of transfer size (%d)\n", 1166 sg->length); 1167 host->flags &= ~SDHCI_REQ_USE_DMA; 1168 break; 1169 } 1170 if (sg->offset & offset_mask) { 1171 DBG("Reverting to PIO because of bad alignment\n"); 1172 host->flags &= ~SDHCI_REQ_USE_DMA; 1173 break; 1174 } 1175 } 1176 } 1177 } 1178 1179 if (host->flags & SDHCI_REQ_USE_DMA) { 1180 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1181 1182 if (sg_cnt <= 0) { 1183 /* 1184 * This only happens when someone fed 1185 * us an invalid request. 1186 */ 1187 WARN_ON(1); 1188 host->flags &= ~SDHCI_REQ_USE_DMA; 1189 } else if (host->flags & SDHCI_USE_ADMA) { 1190 sdhci_adma_table_pre(host, data, sg_cnt); 1191 sdhci_set_adma_addr(host, host->adma_addr); 1192 } else { 1193 WARN_ON(sg_cnt != 1); 1194 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1195 } 1196 } 1197 1198 sdhci_config_dma(host); 1199 1200 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1201 int flags; 1202 1203 flags = SG_MITER_ATOMIC; 1204 if (host->data->flags & MMC_DATA_READ) 1205 flags |= SG_MITER_TO_SG; 1206 else 1207 flags |= SG_MITER_FROM_SG; 1208 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1209 host->blocks = data->blocks; 1210 } 1211 1212 sdhci_set_transfer_irqs(host); 1213 1214 sdhci_set_block_info(host, data); 1215 } 1216 1217 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1218 1219 static int sdhci_external_dma_init(struct sdhci_host *host) 1220 { 1221 int ret = 0; 1222 struct mmc_host *mmc = host->mmc; 1223 1224 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1225 if (IS_ERR(host->tx_chan)) { 1226 ret = PTR_ERR(host->tx_chan); 1227 if (ret != -EPROBE_DEFER) 1228 pr_warn("Failed to request TX DMA channel.\n"); 1229 host->tx_chan = NULL; 1230 return ret; 1231 } 1232 1233 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1234 if (IS_ERR(host->rx_chan)) { 1235 if (host->tx_chan) { 1236 dma_release_channel(host->tx_chan); 1237 host->tx_chan = NULL; 1238 } 1239 1240 ret = PTR_ERR(host->rx_chan); 1241 if (ret != -EPROBE_DEFER) 1242 pr_warn("Failed to request RX DMA channel.\n"); 1243 host->rx_chan = NULL; 1244 } 1245 1246 return ret; 1247 } 1248 1249 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1250 struct mmc_data *data) 1251 { 1252 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1253 } 1254 1255 static int sdhci_external_dma_setup(struct sdhci_host *host, 1256 struct mmc_command *cmd) 1257 { 1258 int ret, i; 1259 enum dma_transfer_direction dir; 1260 struct dma_async_tx_descriptor *desc; 1261 struct mmc_data *data = cmd->data; 1262 struct dma_chan *chan; 1263 struct dma_slave_config cfg; 1264 dma_cookie_t cookie; 1265 int sg_cnt; 1266 1267 if (!host->mapbase) 1268 return -EINVAL; 1269 1270 memset(&cfg, 0, sizeof(cfg)); 1271 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1272 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1273 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1274 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1275 cfg.src_maxburst = data->blksz / 4; 1276 cfg.dst_maxburst = data->blksz / 4; 1277 1278 /* Sanity check: all the SG entries must be aligned by block size. */ 1279 for (i = 0; i < data->sg_len; i++) { 1280 if ((data->sg + i)->length % data->blksz) 1281 return -EINVAL; 1282 } 1283 1284 chan = sdhci_external_dma_channel(host, data); 1285 1286 ret = dmaengine_slave_config(chan, &cfg); 1287 if (ret) 1288 return ret; 1289 1290 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1291 if (sg_cnt <= 0) 1292 return -EINVAL; 1293 1294 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1295 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1296 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1297 if (!desc) 1298 return -EINVAL; 1299 1300 desc->callback = NULL; 1301 desc->callback_param = NULL; 1302 1303 cookie = dmaengine_submit(desc); 1304 if (dma_submit_error(cookie)) 1305 ret = cookie; 1306 1307 return ret; 1308 } 1309 1310 static void sdhci_external_dma_release(struct sdhci_host *host) 1311 { 1312 if (host->tx_chan) { 1313 dma_release_channel(host->tx_chan); 1314 host->tx_chan = NULL; 1315 } 1316 1317 if (host->rx_chan) { 1318 dma_release_channel(host->rx_chan); 1319 host->rx_chan = NULL; 1320 } 1321 1322 sdhci_switch_external_dma(host, false); 1323 } 1324 1325 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1326 struct mmc_command *cmd) 1327 { 1328 struct mmc_data *data = cmd->data; 1329 1330 sdhci_initialize_data(host, data); 1331 1332 host->flags |= SDHCI_REQ_USE_DMA; 1333 sdhci_set_transfer_irqs(host); 1334 1335 sdhci_set_block_info(host, data); 1336 } 1337 1338 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1339 struct mmc_command *cmd) 1340 { 1341 if (!sdhci_external_dma_setup(host, cmd)) { 1342 __sdhci_external_dma_prepare_data(host, cmd); 1343 } else { 1344 sdhci_external_dma_release(host); 1345 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1346 mmc_hostname(host->mmc)); 1347 sdhci_prepare_data(host, cmd); 1348 } 1349 } 1350 1351 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1352 struct mmc_command *cmd) 1353 { 1354 struct dma_chan *chan; 1355 1356 if (!cmd->data) 1357 return; 1358 1359 chan = sdhci_external_dma_channel(host, cmd->data); 1360 if (chan) 1361 dma_async_issue_pending(chan); 1362 } 1363 1364 #else 1365 1366 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1367 { 1368 return -EOPNOTSUPP; 1369 } 1370 1371 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1372 { 1373 } 1374 1375 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1376 struct mmc_command *cmd) 1377 { 1378 /* This should never happen */ 1379 WARN_ON_ONCE(1); 1380 } 1381 1382 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1383 struct mmc_command *cmd) 1384 { 1385 } 1386 1387 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1388 struct mmc_data *data) 1389 { 1390 return NULL; 1391 } 1392 1393 #endif 1394 1395 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1396 { 1397 host->use_external_dma = en; 1398 } 1399 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1400 1401 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1402 struct mmc_request *mrq) 1403 { 1404 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1405 !mrq->cap_cmd_during_tfr; 1406 } 1407 1408 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1409 struct mmc_request *mrq) 1410 { 1411 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1412 } 1413 1414 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1415 struct mmc_request *mrq) 1416 { 1417 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1418 } 1419 1420 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1421 struct mmc_command *cmd, 1422 u16 *mode) 1423 { 1424 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1425 (cmd->opcode != SD_IO_RW_EXTENDED); 1426 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1427 u16 ctrl2; 1428 1429 /* 1430 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1431 * Select' is recommended rather than use of 'Auto CMD12 1432 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1433 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1434 */ 1435 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1436 (use_cmd12 || use_cmd23)) { 1437 *mode |= SDHCI_TRNS_AUTO_SEL; 1438 1439 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1440 if (use_cmd23) 1441 ctrl2 |= SDHCI_CMD23_ENABLE; 1442 else 1443 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1444 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1445 1446 return; 1447 } 1448 1449 /* 1450 * If we are sending CMD23, CMD12 never gets sent 1451 * on successful completion (so no Auto-CMD12). 1452 */ 1453 if (use_cmd12) 1454 *mode |= SDHCI_TRNS_AUTO_CMD12; 1455 else if (use_cmd23) 1456 *mode |= SDHCI_TRNS_AUTO_CMD23; 1457 } 1458 1459 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1460 struct mmc_command *cmd) 1461 { 1462 u16 mode = 0; 1463 struct mmc_data *data = cmd->data; 1464 1465 if (data == NULL) { 1466 if (host->quirks2 & 1467 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1468 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1469 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1470 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1471 } else { 1472 /* clear Auto CMD settings for no data CMDs */ 1473 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1474 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1475 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1476 } 1477 return; 1478 } 1479 1480 WARN_ON(!host->data); 1481 1482 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1483 mode = SDHCI_TRNS_BLK_CNT_EN; 1484 1485 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1486 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1487 sdhci_auto_cmd_select(host, cmd, &mode); 1488 if (sdhci_auto_cmd23(host, cmd->mrq)) 1489 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1490 } 1491 1492 if (data->flags & MMC_DATA_READ) 1493 mode |= SDHCI_TRNS_READ; 1494 if (host->flags & SDHCI_REQ_USE_DMA) 1495 mode |= SDHCI_TRNS_DMA; 1496 1497 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1498 } 1499 1500 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1501 { 1502 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1503 ((mrq->cmd && mrq->cmd->error) || 1504 (mrq->sbc && mrq->sbc->error) || 1505 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1506 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1507 } 1508 1509 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1510 { 1511 int i; 1512 1513 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1514 if (host->mrqs_done[i] == mrq) { 1515 WARN_ON(1); 1516 return; 1517 } 1518 } 1519 1520 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1521 if (!host->mrqs_done[i]) { 1522 host->mrqs_done[i] = mrq; 1523 break; 1524 } 1525 } 1526 1527 WARN_ON(i >= SDHCI_MAX_MRQS); 1528 } 1529 1530 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1531 { 1532 if (host->cmd && host->cmd->mrq == mrq) 1533 host->cmd = NULL; 1534 1535 if (host->data_cmd && host->data_cmd->mrq == mrq) 1536 host->data_cmd = NULL; 1537 1538 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1539 host->deferred_cmd = NULL; 1540 1541 if (host->data && host->data->mrq == mrq) 1542 host->data = NULL; 1543 1544 if (sdhci_needs_reset(host, mrq)) 1545 host->pending_reset = true; 1546 1547 sdhci_set_mrq_done(host, mrq); 1548 1549 sdhci_del_timer(host, mrq); 1550 1551 if (!sdhci_has_requests(host)) 1552 sdhci_led_deactivate(host); 1553 } 1554 1555 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1556 { 1557 __sdhci_finish_mrq(host, mrq); 1558 1559 queue_work(host->complete_wq, &host->complete_work); 1560 } 1561 1562 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1563 { 1564 struct mmc_command *data_cmd = host->data_cmd; 1565 struct mmc_data *data = host->data; 1566 1567 host->data = NULL; 1568 host->data_cmd = NULL; 1569 1570 /* 1571 * The controller needs a reset of internal state machines upon error 1572 * conditions. 1573 */ 1574 if (data->error) { 1575 if (!host->cmd || host->cmd == data_cmd) 1576 sdhci_reset_for(host, REQUEST_ERROR); 1577 else 1578 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1579 } 1580 1581 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1582 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1583 sdhci_adma_table_post(host, data); 1584 1585 /* 1586 * The specification states that the block count register must 1587 * be updated, but it does not specify at what point in the 1588 * data flow. That makes the register entirely useless to read 1589 * back so we have to assume that nothing made it to the card 1590 * in the event of an error. 1591 */ 1592 if (data->error) 1593 data->bytes_xfered = 0; 1594 else 1595 data->bytes_xfered = data->blksz * data->blocks; 1596 1597 /* 1598 * Need to send CMD12 if - 1599 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1600 * b) error in multiblock transfer 1601 */ 1602 if (data->stop && 1603 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1604 data->error)) { 1605 /* 1606 * 'cap_cmd_during_tfr' request must not use the command line 1607 * after mmc_command_done() has been called. It is upper layer's 1608 * responsibility to send the stop command if required. 1609 */ 1610 if (data->mrq->cap_cmd_during_tfr) { 1611 __sdhci_finish_mrq(host, data->mrq); 1612 } else { 1613 /* Avoid triggering warning in sdhci_send_command() */ 1614 host->cmd = NULL; 1615 if (!sdhci_send_command(host, data->stop)) { 1616 if (sw_data_timeout) { 1617 /* 1618 * This is anyway a sw data timeout, so 1619 * give up now. 1620 */ 1621 data->stop->error = -EIO; 1622 __sdhci_finish_mrq(host, data->mrq); 1623 } else { 1624 WARN_ON(host->deferred_cmd); 1625 host->deferred_cmd = data->stop; 1626 } 1627 } 1628 } 1629 } else { 1630 __sdhci_finish_mrq(host, data->mrq); 1631 } 1632 } 1633 1634 static void sdhci_finish_data(struct sdhci_host *host) 1635 { 1636 __sdhci_finish_data(host, false); 1637 } 1638 1639 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1640 { 1641 int flags; 1642 u32 mask; 1643 unsigned long timeout; 1644 1645 WARN_ON(host->cmd); 1646 1647 /* Initially, a command has no error */ 1648 cmd->error = 0; 1649 1650 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1651 cmd->opcode == MMC_STOP_TRANSMISSION) 1652 cmd->flags |= MMC_RSP_BUSY; 1653 1654 mask = SDHCI_CMD_INHIBIT; 1655 if (sdhci_data_line_cmd(cmd)) 1656 mask |= SDHCI_DATA_INHIBIT; 1657 1658 /* We shouldn't wait for data inihibit for stop commands, even 1659 though they might use busy signaling */ 1660 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1661 mask &= ~SDHCI_DATA_INHIBIT; 1662 1663 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1664 return false; 1665 1666 host->cmd = cmd; 1667 host->data_timeout = 0; 1668 if (sdhci_data_line_cmd(cmd)) { 1669 WARN_ON(host->data_cmd); 1670 host->data_cmd = cmd; 1671 sdhci_set_timeout(host, cmd); 1672 } 1673 1674 if (cmd->data) { 1675 if (host->use_external_dma) 1676 sdhci_external_dma_prepare_data(host, cmd); 1677 else 1678 sdhci_prepare_data(host, cmd); 1679 } 1680 1681 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1682 1683 sdhci_set_transfer_mode(host, cmd); 1684 1685 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1686 WARN_ONCE(1, "Unsupported response type!\n"); 1687 /* 1688 * This does not happen in practice because 136-bit response 1689 * commands never have busy waiting, so rather than complicate 1690 * the error path, just remove busy waiting and continue. 1691 */ 1692 cmd->flags &= ~MMC_RSP_BUSY; 1693 } 1694 1695 if (!(cmd->flags & MMC_RSP_PRESENT)) 1696 flags = SDHCI_CMD_RESP_NONE; 1697 else if (cmd->flags & MMC_RSP_136) 1698 flags = SDHCI_CMD_RESP_LONG; 1699 else if (cmd->flags & MMC_RSP_BUSY) 1700 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1701 else 1702 flags = SDHCI_CMD_RESP_SHORT; 1703 1704 if (cmd->flags & MMC_RSP_CRC) 1705 flags |= SDHCI_CMD_CRC; 1706 if (cmd->flags & MMC_RSP_OPCODE) 1707 flags |= SDHCI_CMD_INDEX; 1708 1709 /* CMD19 is special in that the Data Present Select should be set */ 1710 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1711 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1712 flags |= SDHCI_CMD_DATA; 1713 1714 timeout = jiffies; 1715 if (host->data_timeout) 1716 timeout += nsecs_to_jiffies(host->data_timeout); 1717 else if (!cmd->data && cmd->busy_timeout > 9000) 1718 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1719 else 1720 timeout += 10 * HZ; 1721 sdhci_mod_timer(host, cmd->mrq, timeout); 1722 1723 if (host->use_external_dma) 1724 sdhci_external_dma_pre_transfer(host, cmd); 1725 1726 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1727 1728 return true; 1729 } 1730 1731 static bool sdhci_present_error(struct sdhci_host *host, 1732 struct mmc_command *cmd, bool present) 1733 { 1734 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1735 cmd->error = -ENOMEDIUM; 1736 return true; 1737 } 1738 1739 return false; 1740 } 1741 1742 static bool sdhci_send_command_retry(struct sdhci_host *host, 1743 struct mmc_command *cmd, 1744 unsigned long flags) 1745 __releases(host->lock) 1746 __acquires(host->lock) 1747 { 1748 struct mmc_command *deferred_cmd = host->deferred_cmd; 1749 int timeout = 10; /* Approx. 10 ms */ 1750 bool present; 1751 1752 while (!sdhci_send_command(host, cmd)) { 1753 if (!timeout--) { 1754 pr_err("%s: Controller never released inhibit bit(s).\n", 1755 mmc_hostname(host->mmc)); 1756 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1757 sdhci_dumpregs(host); 1758 cmd->error = -EIO; 1759 return false; 1760 } 1761 1762 spin_unlock_irqrestore(&host->lock, flags); 1763 1764 usleep_range(1000, 1250); 1765 1766 present = host->mmc->ops->get_cd(host->mmc); 1767 1768 spin_lock_irqsave(&host->lock, flags); 1769 1770 /* A deferred command might disappear, handle that */ 1771 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1772 return true; 1773 1774 if (sdhci_present_error(host, cmd, present)) 1775 return false; 1776 } 1777 1778 if (cmd == host->deferred_cmd) 1779 host->deferred_cmd = NULL; 1780 1781 return true; 1782 } 1783 1784 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1785 { 1786 int i, reg; 1787 1788 for (i = 0; i < 4; i++) { 1789 reg = SDHCI_RESPONSE + (3 - i) * 4; 1790 cmd->resp[i] = sdhci_readl(host, reg); 1791 } 1792 1793 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1794 return; 1795 1796 /* CRC is stripped so we need to do some shifting */ 1797 for (i = 0; i < 4; i++) { 1798 cmd->resp[i] <<= 8; 1799 if (i != 3) 1800 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1801 } 1802 } 1803 1804 static void sdhci_finish_command(struct sdhci_host *host) 1805 { 1806 struct mmc_command *cmd = host->cmd; 1807 1808 host->cmd = NULL; 1809 1810 if (cmd->flags & MMC_RSP_PRESENT) { 1811 if (cmd->flags & MMC_RSP_136) { 1812 sdhci_read_rsp_136(host, cmd); 1813 } else { 1814 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1815 } 1816 } 1817 1818 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1819 mmc_command_done(host->mmc, cmd->mrq); 1820 1821 /* 1822 * The host can send and interrupt when the busy state has 1823 * ended, allowing us to wait without wasting CPU cycles. 1824 * The busy signal uses DAT0 so this is similar to waiting 1825 * for data to complete. 1826 * 1827 * Note: The 1.0 specification is a bit ambiguous about this 1828 * feature so there might be some problems with older 1829 * controllers. 1830 */ 1831 if (cmd->flags & MMC_RSP_BUSY) { 1832 if (cmd->data) { 1833 DBG("Cannot wait for busy signal when also doing a data transfer"); 1834 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1835 cmd == host->data_cmd) { 1836 /* Command complete before busy is ended */ 1837 return; 1838 } 1839 } 1840 1841 /* Finished CMD23, now send actual command. */ 1842 if (cmd == cmd->mrq->sbc) { 1843 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1844 WARN_ON(host->deferred_cmd); 1845 host->deferred_cmd = cmd->mrq->cmd; 1846 } 1847 } else { 1848 1849 /* Processed actual command. */ 1850 if (host->data && host->data_early) 1851 sdhci_finish_data(host); 1852 1853 if (!cmd->data) 1854 __sdhci_finish_mrq(host, cmd->mrq); 1855 } 1856 } 1857 1858 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1859 { 1860 u16 preset = 0; 1861 1862 switch (host->timing) { 1863 case MMC_TIMING_MMC_HS: 1864 case MMC_TIMING_SD_HS: 1865 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1866 break; 1867 case MMC_TIMING_UHS_SDR12: 1868 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1869 break; 1870 case MMC_TIMING_UHS_SDR25: 1871 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1872 break; 1873 case MMC_TIMING_UHS_SDR50: 1874 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1875 break; 1876 case MMC_TIMING_UHS_SDR104: 1877 case MMC_TIMING_MMC_HS200: 1878 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1879 break; 1880 case MMC_TIMING_UHS_DDR50: 1881 case MMC_TIMING_MMC_DDR52: 1882 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1883 break; 1884 case MMC_TIMING_MMC_HS400: 1885 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1886 break; 1887 default: 1888 pr_warn("%s: Invalid UHS-I mode selected\n", 1889 mmc_hostname(host->mmc)); 1890 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1891 break; 1892 } 1893 return preset; 1894 } 1895 1896 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1897 unsigned int *actual_clock) 1898 { 1899 int div = 0; /* Initialized for compiler warning */ 1900 int real_div = div, clk_mul = 1; 1901 u16 clk = 0; 1902 bool switch_base_clk = false; 1903 1904 if (host->version >= SDHCI_SPEC_300) { 1905 if (host->preset_enabled) { 1906 u16 pre_val; 1907 1908 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1909 pre_val = sdhci_get_preset_value(host); 1910 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1911 if (host->clk_mul && 1912 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1913 clk = SDHCI_PROG_CLOCK_MODE; 1914 real_div = div + 1; 1915 clk_mul = host->clk_mul; 1916 } else { 1917 real_div = max_t(int, 1, div << 1); 1918 } 1919 goto clock_set; 1920 } 1921 1922 /* 1923 * Check if the Host Controller supports Programmable Clock 1924 * Mode. 1925 */ 1926 if (host->clk_mul) { 1927 for (div = 1; div <= 1024; div++) { 1928 if ((host->max_clk * host->clk_mul / div) 1929 <= clock) 1930 break; 1931 } 1932 if ((host->max_clk * host->clk_mul / div) <= clock) { 1933 /* 1934 * Set Programmable Clock Mode in the Clock 1935 * Control register. 1936 */ 1937 clk = SDHCI_PROG_CLOCK_MODE; 1938 real_div = div; 1939 clk_mul = host->clk_mul; 1940 div--; 1941 } else { 1942 /* 1943 * Divisor can be too small to reach clock 1944 * speed requirement. Then use the base clock. 1945 */ 1946 switch_base_clk = true; 1947 } 1948 } 1949 1950 if (!host->clk_mul || switch_base_clk) { 1951 /* Version 3.00 divisors must be a multiple of 2. */ 1952 if (host->max_clk <= clock) 1953 div = 1; 1954 else { 1955 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1956 div += 2) { 1957 if ((host->max_clk / div) <= clock) 1958 break; 1959 } 1960 } 1961 real_div = div; 1962 div >>= 1; 1963 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1964 && !div && host->max_clk <= 25000000) 1965 div = 1; 1966 } 1967 } else { 1968 /* Version 2.00 divisors must be a power of 2. */ 1969 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1970 if ((host->max_clk / div) <= clock) 1971 break; 1972 } 1973 real_div = div; 1974 div >>= 1; 1975 } 1976 1977 clock_set: 1978 if (real_div) 1979 *actual_clock = (host->max_clk * clk_mul) / real_div; 1980 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1981 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1982 << SDHCI_DIVIDER_HI_SHIFT; 1983 1984 return clk; 1985 } 1986 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1987 1988 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1989 { 1990 ktime_t timeout; 1991 1992 clk |= SDHCI_CLOCK_INT_EN; 1993 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1994 1995 /* Wait max 150 ms */ 1996 timeout = ktime_add_ms(ktime_get(), 150); 1997 while (1) { 1998 bool timedout = ktime_after(ktime_get(), timeout); 1999 2000 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2001 if (clk & SDHCI_CLOCK_INT_STABLE) 2002 break; 2003 if (timedout) { 2004 pr_err("%s: Internal clock never stabilised.\n", 2005 mmc_hostname(host->mmc)); 2006 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2007 sdhci_dumpregs(host); 2008 return; 2009 } 2010 udelay(10); 2011 } 2012 2013 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 2014 clk |= SDHCI_CLOCK_PLL_EN; 2015 clk &= ~SDHCI_CLOCK_INT_STABLE; 2016 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2017 2018 /* Wait max 150 ms */ 2019 timeout = ktime_add_ms(ktime_get(), 150); 2020 while (1) { 2021 bool timedout = ktime_after(ktime_get(), timeout); 2022 2023 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2024 if (clk & SDHCI_CLOCK_INT_STABLE) 2025 break; 2026 if (timedout) { 2027 pr_err("%s: PLL clock never stabilised.\n", 2028 mmc_hostname(host->mmc)); 2029 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2030 sdhci_dumpregs(host); 2031 return; 2032 } 2033 udelay(10); 2034 } 2035 } 2036 2037 clk |= SDHCI_CLOCK_CARD_EN; 2038 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2039 } 2040 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2041 2042 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2043 { 2044 u16 clk; 2045 2046 host->mmc->actual_clock = 0; 2047 2048 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2049 2050 if (clock == 0) 2051 return; 2052 2053 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2054 sdhci_enable_clk(host, clk); 2055 } 2056 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2057 2058 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2059 unsigned short vdd) 2060 { 2061 struct mmc_host *mmc = host->mmc; 2062 2063 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2064 2065 if (mode != MMC_POWER_OFF) 2066 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2067 else 2068 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2069 } 2070 2071 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2072 unsigned short vdd) 2073 { 2074 u8 pwr = 0; 2075 2076 if (mode != MMC_POWER_OFF) { 2077 switch (1 << vdd) { 2078 case MMC_VDD_165_195: 2079 /* 2080 * Without a regulator, SDHCI does not support 2.0v 2081 * so we only get here if the driver deliberately 2082 * added the 2.0v range to ocr_avail. Map it to 1.8v 2083 * for the purpose of turning on the power. 2084 */ 2085 case MMC_VDD_20_21: 2086 pwr = SDHCI_POWER_180; 2087 break; 2088 case MMC_VDD_29_30: 2089 case MMC_VDD_30_31: 2090 pwr = SDHCI_POWER_300; 2091 break; 2092 case MMC_VDD_32_33: 2093 case MMC_VDD_33_34: 2094 /* 2095 * 3.4 ~ 3.6V are valid only for those platforms where it's 2096 * known that the voltage range is supported by hardware. 2097 */ 2098 case MMC_VDD_34_35: 2099 case MMC_VDD_35_36: 2100 pwr = SDHCI_POWER_330; 2101 break; 2102 default: 2103 WARN(1, "%s: Invalid vdd %#x\n", 2104 mmc_hostname(host->mmc), vdd); 2105 break; 2106 } 2107 } 2108 2109 if (host->pwr == pwr) 2110 return; 2111 2112 host->pwr = pwr; 2113 2114 if (pwr == 0) { 2115 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2116 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2117 sdhci_runtime_pm_bus_off(host); 2118 } else { 2119 /* 2120 * Spec says that we should clear the power reg before setting 2121 * a new value. Some controllers don't seem to like this though. 2122 */ 2123 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2124 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2125 2126 /* 2127 * At least the Marvell CaFe chip gets confused if we set the 2128 * voltage and set turn on power at the same time, so set the 2129 * voltage first. 2130 */ 2131 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2132 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2133 2134 pwr |= SDHCI_POWER_ON; 2135 2136 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2137 2138 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2139 sdhci_runtime_pm_bus_on(host); 2140 2141 /* 2142 * Some controllers need an extra 10ms delay of 10ms before 2143 * they can apply clock after applying power 2144 */ 2145 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2146 mdelay(10); 2147 } 2148 } 2149 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2150 2151 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2152 unsigned short vdd) 2153 { 2154 if (IS_ERR(host->mmc->supply.vmmc)) 2155 sdhci_set_power_noreg(host, mode, vdd); 2156 else 2157 sdhci_set_power_reg(host, mode, vdd); 2158 } 2159 EXPORT_SYMBOL_GPL(sdhci_set_power); 2160 2161 /* 2162 * Some controllers need to configure a valid bus voltage on their power 2163 * register regardless of whether an external regulator is taking care of power 2164 * supply. This helper function takes care of it if set as the controller's 2165 * sdhci_ops.set_power callback. 2166 */ 2167 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2168 unsigned char mode, 2169 unsigned short vdd) 2170 { 2171 if (!IS_ERR(host->mmc->supply.vmmc)) { 2172 struct mmc_host *mmc = host->mmc; 2173 2174 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2175 } 2176 sdhci_set_power_noreg(host, mode, vdd); 2177 } 2178 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2179 2180 /*****************************************************************************\ 2181 * * 2182 * MMC callbacks * 2183 * * 2184 \*****************************************************************************/ 2185 2186 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2187 { 2188 struct sdhci_host *host = mmc_priv(mmc); 2189 struct mmc_command *cmd; 2190 unsigned long flags; 2191 bool present; 2192 2193 /* Firstly check card presence */ 2194 present = mmc->ops->get_cd(mmc); 2195 2196 spin_lock_irqsave(&host->lock, flags); 2197 2198 sdhci_led_activate(host); 2199 2200 if (sdhci_present_error(host, mrq->cmd, present)) 2201 goto out_finish; 2202 2203 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2204 2205 if (!sdhci_send_command_retry(host, cmd, flags)) 2206 goto out_finish; 2207 2208 spin_unlock_irqrestore(&host->lock, flags); 2209 2210 return; 2211 2212 out_finish: 2213 sdhci_finish_mrq(host, mrq); 2214 spin_unlock_irqrestore(&host->lock, flags); 2215 } 2216 EXPORT_SYMBOL_GPL(sdhci_request); 2217 2218 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2219 { 2220 struct sdhci_host *host = mmc_priv(mmc); 2221 struct mmc_command *cmd; 2222 unsigned long flags; 2223 int ret = 0; 2224 2225 spin_lock_irqsave(&host->lock, flags); 2226 2227 if (sdhci_present_error(host, mrq->cmd, true)) { 2228 sdhci_finish_mrq(host, mrq); 2229 goto out_finish; 2230 } 2231 2232 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2233 2234 /* 2235 * The HSQ may send a command in interrupt context without polling 2236 * the busy signaling, which means we should return BUSY if controller 2237 * has not released inhibit bits to allow HSQ trying to send request 2238 * again in non-atomic context. So we should not finish this request 2239 * here. 2240 */ 2241 if (!sdhci_send_command(host, cmd)) 2242 ret = -EBUSY; 2243 else 2244 sdhci_led_activate(host); 2245 2246 out_finish: 2247 spin_unlock_irqrestore(&host->lock, flags); 2248 return ret; 2249 } 2250 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2251 2252 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2253 { 2254 u8 ctrl; 2255 2256 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2257 if (width == MMC_BUS_WIDTH_8) { 2258 ctrl &= ~SDHCI_CTRL_4BITBUS; 2259 ctrl |= SDHCI_CTRL_8BITBUS; 2260 } else { 2261 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2262 ctrl &= ~SDHCI_CTRL_8BITBUS; 2263 if (width == MMC_BUS_WIDTH_4) 2264 ctrl |= SDHCI_CTRL_4BITBUS; 2265 else 2266 ctrl &= ~SDHCI_CTRL_4BITBUS; 2267 } 2268 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2269 } 2270 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2271 2272 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2273 { 2274 u16 ctrl_2; 2275 2276 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2277 /* Select Bus Speed Mode for host */ 2278 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2279 if ((timing == MMC_TIMING_MMC_HS200) || 2280 (timing == MMC_TIMING_UHS_SDR104)) 2281 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2282 else if (timing == MMC_TIMING_UHS_SDR12) 2283 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2284 else if (timing == MMC_TIMING_UHS_SDR25) 2285 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2286 else if (timing == MMC_TIMING_UHS_SDR50) 2287 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2288 else if ((timing == MMC_TIMING_UHS_DDR50) || 2289 (timing == MMC_TIMING_MMC_DDR52)) 2290 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2291 else if (timing == MMC_TIMING_MMC_HS400) 2292 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2293 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2294 } 2295 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2296 2297 static bool sdhci_timing_has_preset(unsigned char timing) 2298 { 2299 switch (timing) { 2300 case MMC_TIMING_UHS_SDR12: 2301 case MMC_TIMING_UHS_SDR25: 2302 case MMC_TIMING_UHS_SDR50: 2303 case MMC_TIMING_UHS_SDR104: 2304 case MMC_TIMING_UHS_DDR50: 2305 case MMC_TIMING_MMC_DDR52: 2306 return true; 2307 }; 2308 return false; 2309 } 2310 2311 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) 2312 { 2313 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2314 sdhci_timing_has_preset(timing); 2315 } 2316 2317 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) 2318 { 2319 /* 2320 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK 2321 * Frequency. Check if preset values need to be enabled, or the Driver 2322 * Strength needs updating. Note, clock changes are handled separately. 2323 */ 2324 return !host->preset_enabled && 2325 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); 2326 } 2327 2328 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2329 { 2330 struct sdhci_host *host = mmc_priv(mmc); 2331 bool reinit_uhs = host->reinit_uhs; 2332 bool turning_on_clk = false; 2333 u8 ctrl; 2334 2335 host->reinit_uhs = false; 2336 2337 if (ios->power_mode == MMC_POWER_UNDEFINED) 2338 return; 2339 2340 if (host->flags & SDHCI_DEVICE_DEAD) { 2341 if (!IS_ERR(mmc->supply.vmmc) && 2342 ios->power_mode == MMC_POWER_OFF) 2343 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2344 return; 2345 } 2346 2347 /* 2348 * Reset the chip on each power off. 2349 * Should clear out any weird states. 2350 */ 2351 if (ios->power_mode == MMC_POWER_OFF) { 2352 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2353 sdhci_reinit(host); 2354 } 2355 2356 if (host->version >= SDHCI_SPEC_300 && 2357 (ios->power_mode == MMC_POWER_UP) && 2358 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2359 sdhci_enable_preset_value(host, false); 2360 2361 if (!ios->clock || ios->clock != host->clock) { 2362 turning_on_clk = ios->clock && !host->clock; 2363 2364 host->ops->set_clock(host, ios->clock); 2365 host->clock = ios->clock; 2366 2367 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2368 host->clock) { 2369 host->timeout_clk = mmc->actual_clock ? 2370 mmc->actual_clock / 1000 : 2371 host->clock / 1000; 2372 mmc->max_busy_timeout = 2373 host->ops->get_max_timeout_count ? 2374 host->ops->get_max_timeout_count(host) : 2375 1 << 27; 2376 mmc->max_busy_timeout /= host->timeout_clk; 2377 } 2378 } 2379 2380 if (host->ops->set_power) 2381 host->ops->set_power(host, ios->power_mode, ios->vdd); 2382 else 2383 sdhci_set_power(host, ios->power_mode, ios->vdd); 2384 2385 if (host->ops->platform_send_init_74_clocks) 2386 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2387 2388 host->ops->set_bus_width(host, ios->bus_width); 2389 2390 /* 2391 * Special case to avoid multiple clock changes during voltage 2392 * switching. 2393 */ 2394 if (!reinit_uhs && 2395 turning_on_clk && 2396 host->timing == ios->timing && 2397 host->version >= SDHCI_SPEC_300 && 2398 !sdhci_presetable_values_change(host, ios)) 2399 return; 2400 2401 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2402 2403 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2404 if (ios->timing == MMC_TIMING_SD_HS || 2405 ios->timing == MMC_TIMING_MMC_HS || 2406 ios->timing == MMC_TIMING_MMC_HS400 || 2407 ios->timing == MMC_TIMING_MMC_HS200 || 2408 ios->timing == MMC_TIMING_MMC_DDR52 || 2409 ios->timing == MMC_TIMING_UHS_SDR50 || 2410 ios->timing == MMC_TIMING_UHS_SDR104 || 2411 ios->timing == MMC_TIMING_UHS_DDR50 || 2412 ios->timing == MMC_TIMING_UHS_SDR25) 2413 ctrl |= SDHCI_CTRL_HISPD; 2414 else 2415 ctrl &= ~SDHCI_CTRL_HISPD; 2416 } 2417 2418 if (host->version >= SDHCI_SPEC_300) { 2419 u16 clk, ctrl_2; 2420 2421 if (!host->preset_enabled) { 2422 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2423 /* 2424 * We only need to set Driver Strength if the 2425 * preset value enable is not set. 2426 */ 2427 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2428 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2429 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2430 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2431 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2432 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2433 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2434 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2435 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2436 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2437 else { 2438 pr_warn("%s: invalid driver type, default to driver type B\n", 2439 mmc_hostname(mmc)); 2440 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2441 } 2442 2443 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2444 host->drv_type = ios->drv_type; 2445 } else { 2446 /* 2447 * According to SDHC Spec v3.00, if the Preset Value 2448 * Enable in the Host Control 2 register is set, we 2449 * need to reset SD Clock Enable before changing High 2450 * Speed Enable to avoid generating clock gliches. 2451 */ 2452 2453 /* Reset SD Clock Enable */ 2454 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2455 clk &= ~SDHCI_CLOCK_CARD_EN; 2456 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2457 2458 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2459 2460 /* Re-enable SD Clock */ 2461 host->ops->set_clock(host, host->clock); 2462 } 2463 2464 /* Reset SD Clock Enable */ 2465 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2466 clk &= ~SDHCI_CLOCK_CARD_EN; 2467 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2468 2469 host->ops->set_uhs_signaling(host, ios->timing); 2470 host->timing = ios->timing; 2471 2472 if (sdhci_preset_needed(host, ios->timing)) { 2473 u16 preset; 2474 2475 sdhci_enable_preset_value(host, true); 2476 preset = sdhci_get_preset_value(host); 2477 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2478 preset); 2479 host->drv_type = ios->drv_type; 2480 } 2481 2482 /* Re-enable SD Clock */ 2483 host->ops->set_clock(host, host->clock); 2484 } else 2485 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2486 } 2487 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2488 2489 static int sdhci_get_cd(struct mmc_host *mmc) 2490 { 2491 struct sdhci_host *host = mmc_priv(mmc); 2492 int gpio_cd = mmc_gpio_get_cd(mmc); 2493 2494 if (host->flags & SDHCI_DEVICE_DEAD) 2495 return 0; 2496 2497 /* If nonremovable, assume that the card is always present. */ 2498 if (!mmc_card_is_removable(mmc)) 2499 return 1; 2500 2501 /* 2502 * Try slot gpio detect, if defined it take precedence 2503 * over build in controller functionality 2504 */ 2505 if (gpio_cd >= 0) 2506 return !!gpio_cd; 2507 2508 /* If polling, assume that the card is always present. */ 2509 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2510 return 1; 2511 2512 /* Host native card detect */ 2513 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2514 } 2515 2516 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2517 { 2518 struct sdhci_host *host = mmc_priv(mmc); 2519 unsigned long flags; 2520 int ret = 0; 2521 2522 spin_lock_irqsave(&host->lock, flags); 2523 2524 if (host->flags & SDHCI_DEVICE_DEAD) 2525 goto out; 2526 2527 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2528 out: 2529 spin_unlock_irqrestore(&host->lock, flags); 2530 2531 return ret; 2532 } 2533 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2534 2535 static int sdhci_check_ro(struct sdhci_host *host) 2536 { 2537 unsigned long flags; 2538 int is_readonly; 2539 2540 spin_lock_irqsave(&host->lock, flags); 2541 2542 if (host->flags & SDHCI_DEVICE_DEAD) 2543 is_readonly = 0; 2544 else if (host->ops->get_ro) 2545 is_readonly = host->ops->get_ro(host); 2546 else if (mmc_can_gpio_ro(host->mmc)) 2547 is_readonly = mmc_gpio_get_ro(host->mmc); 2548 else 2549 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2550 & SDHCI_WRITE_PROTECT); 2551 2552 spin_unlock_irqrestore(&host->lock, flags); 2553 2554 /* This quirk needs to be replaced by a callback-function later */ 2555 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2556 !is_readonly : is_readonly; 2557 } 2558 2559 #define SAMPLE_COUNT 5 2560 2561 static int sdhci_get_ro(struct mmc_host *mmc) 2562 { 2563 struct sdhci_host *host = mmc_priv(mmc); 2564 int i, ro_count; 2565 2566 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2567 return sdhci_check_ro(host); 2568 2569 ro_count = 0; 2570 for (i = 0; i < SAMPLE_COUNT; i++) { 2571 if (sdhci_check_ro(host)) { 2572 if (++ro_count > SAMPLE_COUNT / 2) 2573 return 1; 2574 } 2575 msleep(30); 2576 } 2577 return 0; 2578 } 2579 2580 static void sdhci_hw_reset(struct mmc_host *mmc) 2581 { 2582 struct sdhci_host *host = mmc_priv(mmc); 2583 2584 if (host->ops && host->ops->hw_reset) 2585 host->ops->hw_reset(host); 2586 } 2587 2588 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2589 { 2590 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2591 if (enable) 2592 host->ier |= SDHCI_INT_CARD_INT; 2593 else 2594 host->ier &= ~SDHCI_INT_CARD_INT; 2595 2596 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2597 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2598 } 2599 } 2600 2601 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2602 { 2603 struct sdhci_host *host = mmc_priv(mmc); 2604 unsigned long flags; 2605 2606 if (enable) 2607 pm_runtime_get_noresume(mmc_dev(mmc)); 2608 2609 spin_lock_irqsave(&host->lock, flags); 2610 sdhci_enable_sdio_irq_nolock(host, enable); 2611 spin_unlock_irqrestore(&host->lock, flags); 2612 2613 if (!enable) 2614 pm_runtime_put_noidle(mmc_dev(mmc)); 2615 } 2616 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2617 2618 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2619 { 2620 struct sdhci_host *host = mmc_priv(mmc); 2621 unsigned long flags; 2622 2623 spin_lock_irqsave(&host->lock, flags); 2624 sdhci_enable_sdio_irq_nolock(host, true); 2625 spin_unlock_irqrestore(&host->lock, flags); 2626 } 2627 2628 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2629 struct mmc_ios *ios) 2630 { 2631 struct sdhci_host *host = mmc_priv(mmc); 2632 u16 ctrl; 2633 int ret; 2634 2635 /* 2636 * Signal Voltage Switching is only applicable for Host Controllers 2637 * v3.00 and above. 2638 */ 2639 if (host->version < SDHCI_SPEC_300) 2640 return 0; 2641 2642 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2643 2644 switch (ios->signal_voltage) { 2645 case MMC_SIGNAL_VOLTAGE_330: 2646 if (!(host->flags & SDHCI_SIGNALING_330)) 2647 return -EINVAL; 2648 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2649 ctrl &= ~SDHCI_CTRL_VDD_180; 2650 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2651 2652 if (!IS_ERR(mmc->supply.vqmmc)) { 2653 ret = mmc_regulator_set_vqmmc(mmc, ios); 2654 if (ret < 0) { 2655 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2656 mmc_hostname(mmc)); 2657 return -EIO; 2658 } 2659 } 2660 /* Wait for 5ms */ 2661 usleep_range(5000, 5500); 2662 2663 /* 3.3V regulator output should be stable within 5 ms */ 2664 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2665 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2666 return 0; 2667 2668 pr_warn("%s: 3.3V regulator output did not become stable\n", 2669 mmc_hostname(mmc)); 2670 2671 return -EAGAIN; 2672 case MMC_SIGNAL_VOLTAGE_180: 2673 if (!(host->flags & SDHCI_SIGNALING_180)) 2674 return -EINVAL; 2675 if (!IS_ERR(mmc->supply.vqmmc)) { 2676 ret = mmc_regulator_set_vqmmc(mmc, ios); 2677 if (ret < 0) { 2678 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2679 mmc_hostname(mmc)); 2680 return -EIO; 2681 } 2682 } 2683 2684 /* 2685 * Enable 1.8V Signal Enable in the Host Control2 2686 * register 2687 */ 2688 ctrl |= SDHCI_CTRL_VDD_180; 2689 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2690 2691 /* Some controller need to do more when switching */ 2692 if (host->ops->voltage_switch) 2693 host->ops->voltage_switch(host); 2694 2695 /* 1.8V regulator output should be stable within 5 ms */ 2696 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2697 if (ctrl & SDHCI_CTRL_VDD_180) 2698 return 0; 2699 2700 pr_warn("%s: 1.8V regulator output did not become stable\n", 2701 mmc_hostname(mmc)); 2702 2703 return -EAGAIN; 2704 case MMC_SIGNAL_VOLTAGE_120: 2705 if (!(host->flags & SDHCI_SIGNALING_120)) 2706 return -EINVAL; 2707 if (!IS_ERR(mmc->supply.vqmmc)) { 2708 ret = mmc_regulator_set_vqmmc(mmc, ios); 2709 if (ret < 0) { 2710 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2711 mmc_hostname(mmc)); 2712 return -EIO; 2713 } 2714 } 2715 return 0; 2716 default: 2717 /* No signal voltage switch required */ 2718 return 0; 2719 } 2720 } 2721 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2722 2723 static int sdhci_card_busy(struct mmc_host *mmc) 2724 { 2725 struct sdhci_host *host = mmc_priv(mmc); 2726 u32 present_state; 2727 2728 /* Check whether DAT[0] is 0 */ 2729 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2730 2731 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2732 } 2733 2734 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2735 { 2736 struct sdhci_host *host = mmc_priv(mmc); 2737 unsigned long flags; 2738 2739 spin_lock_irqsave(&host->lock, flags); 2740 host->flags |= SDHCI_HS400_TUNING; 2741 spin_unlock_irqrestore(&host->lock, flags); 2742 2743 return 0; 2744 } 2745 2746 void sdhci_start_tuning(struct sdhci_host *host) 2747 { 2748 u16 ctrl; 2749 2750 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2751 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2752 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2753 ctrl |= SDHCI_CTRL_TUNED_CLK; 2754 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2755 2756 /* 2757 * As per the Host Controller spec v3.00, tuning command 2758 * generates Buffer Read Ready interrupt, so enable that. 2759 * 2760 * Note: The spec clearly says that when tuning sequence 2761 * is being performed, the controller does not generate 2762 * interrupts other than Buffer Read Ready interrupt. But 2763 * to make sure we don't hit a controller bug, we _only_ 2764 * enable Buffer Read Ready interrupt here. 2765 */ 2766 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2767 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2768 } 2769 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2770 2771 void sdhci_end_tuning(struct sdhci_host *host) 2772 { 2773 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2774 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2775 } 2776 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2777 2778 void sdhci_reset_tuning(struct sdhci_host *host) 2779 { 2780 u16 ctrl; 2781 2782 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2783 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2784 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2785 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2786 } 2787 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2788 2789 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2790 { 2791 sdhci_reset_tuning(host); 2792 2793 sdhci_reset_for(host, TUNING_ABORT); 2794 2795 sdhci_end_tuning(host); 2796 2797 mmc_send_abort_tuning(host->mmc, opcode); 2798 } 2799 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2800 2801 /* 2802 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2803 * tuning command does not have a data payload (or rather the hardware does it 2804 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2805 * interrupt setup is different to other commands and there is no timeout 2806 * interrupt so special handling is needed. 2807 */ 2808 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2809 { 2810 struct mmc_host *mmc = host->mmc; 2811 struct mmc_command cmd = {}; 2812 struct mmc_request mrq = {}; 2813 unsigned long flags; 2814 u32 b = host->sdma_boundary; 2815 2816 spin_lock_irqsave(&host->lock, flags); 2817 2818 cmd.opcode = opcode; 2819 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2820 cmd.mrq = &mrq; 2821 2822 mrq.cmd = &cmd; 2823 /* 2824 * In response to CMD19, the card sends 64 bytes of tuning 2825 * block to the Host Controller. So we set the block size 2826 * to 64 here. 2827 */ 2828 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2829 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2830 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2831 else 2832 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2833 2834 /* 2835 * The tuning block is sent by the card to the host controller. 2836 * So we set the TRNS_READ bit in the Transfer Mode register. 2837 * This also takes care of setting DMA Enable and Multi Block 2838 * Select in the same register to 0. 2839 */ 2840 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2841 2842 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2843 spin_unlock_irqrestore(&host->lock, flags); 2844 host->tuning_done = 0; 2845 return; 2846 } 2847 2848 host->cmd = NULL; 2849 2850 sdhci_del_timer(host, &mrq); 2851 2852 host->tuning_done = 0; 2853 2854 spin_unlock_irqrestore(&host->lock, flags); 2855 2856 /* Wait for Buffer Read Ready interrupt */ 2857 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2858 msecs_to_jiffies(50)); 2859 2860 } 2861 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2862 2863 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2864 { 2865 int i; 2866 2867 /* 2868 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2869 * of loops reaches tuning loop count. 2870 */ 2871 for (i = 0; i < host->tuning_loop_count; i++) { 2872 u16 ctrl; 2873 2874 sdhci_send_tuning(host, opcode); 2875 2876 if (!host->tuning_done) { 2877 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2878 mmc_hostname(host->mmc)); 2879 sdhci_abort_tuning(host, opcode); 2880 return -ETIMEDOUT; 2881 } 2882 2883 /* Spec does not require a delay between tuning cycles */ 2884 if (host->tuning_delay > 0) 2885 mdelay(host->tuning_delay); 2886 2887 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2888 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2889 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2890 return 0; /* Success! */ 2891 break; 2892 } 2893 2894 } 2895 2896 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2897 mmc_hostname(host->mmc)); 2898 sdhci_reset_tuning(host); 2899 return -EAGAIN; 2900 } 2901 2902 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2903 { 2904 struct sdhci_host *host = mmc_priv(mmc); 2905 int err = 0; 2906 unsigned int tuning_count = 0; 2907 bool hs400_tuning; 2908 2909 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2910 2911 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2912 tuning_count = host->tuning_count; 2913 2914 /* 2915 * The Host Controller needs tuning in case of SDR104 and DDR50 2916 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2917 * the Capabilities register. 2918 * If the Host Controller supports the HS200 mode then the 2919 * tuning function has to be executed. 2920 */ 2921 switch (host->timing) { 2922 /* HS400 tuning is done in HS200 mode */ 2923 case MMC_TIMING_MMC_HS400: 2924 err = -EINVAL; 2925 goto out; 2926 2927 case MMC_TIMING_MMC_HS200: 2928 /* 2929 * Periodic re-tuning for HS400 is not expected to be needed, so 2930 * disable it here. 2931 */ 2932 if (hs400_tuning) 2933 tuning_count = 0; 2934 break; 2935 2936 case MMC_TIMING_UHS_SDR104: 2937 case MMC_TIMING_UHS_DDR50: 2938 break; 2939 2940 case MMC_TIMING_UHS_SDR50: 2941 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2942 break; 2943 fallthrough; 2944 2945 default: 2946 goto out; 2947 } 2948 2949 if (host->ops->platform_execute_tuning) { 2950 err = host->ops->platform_execute_tuning(host, opcode); 2951 goto out; 2952 } 2953 2954 mmc->retune_period = tuning_count; 2955 2956 if (host->tuning_delay < 0) 2957 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2958 2959 sdhci_start_tuning(host); 2960 2961 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2962 2963 sdhci_end_tuning(host); 2964 out: 2965 host->flags &= ~SDHCI_HS400_TUNING; 2966 2967 return err; 2968 } 2969 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2970 2971 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2972 { 2973 /* Host Controller v3.00 defines preset value registers */ 2974 if (host->version < SDHCI_SPEC_300) 2975 return; 2976 2977 /* 2978 * We only enable or disable Preset Value if they are not already 2979 * enabled or disabled respectively. Otherwise, we bail out. 2980 */ 2981 if (host->preset_enabled != enable) { 2982 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2983 2984 if (enable) 2985 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2986 else 2987 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2988 2989 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2990 2991 if (enable) 2992 host->flags |= SDHCI_PV_ENABLED; 2993 else 2994 host->flags &= ~SDHCI_PV_ENABLED; 2995 2996 host->preset_enabled = enable; 2997 } 2998 } 2999 3000 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 3001 int err) 3002 { 3003 struct mmc_data *data = mrq->data; 3004 3005 if (data->host_cookie != COOKIE_UNMAPPED) 3006 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 3007 mmc_get_dma_dir(data)); 3008 3009 data->host_cookie = COOKIE_UNMAPPED; 3010 } 3011 3012 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 3013 { 3014 struct sdhci_host *host = mmc_priv(mmc); 3015 3016 mrq->data->host_cookie = COOKIE_UNMAPPED; 3017 3018 /* 3019 * No pre-mapping in the pre hook if we're using the bounce buffer, 3020 * for that we would need two bounce buffers since one buffer is 3021 * in flight when this is getting called. 3022 */ 3023 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 3024 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 3025 } 3026 3027 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 3028 { 3029 if (host->data_cmd) { 3030 host->data_cmd->error = err; 3031 sdhci_finish_mrq(host, host->data_cmd->mrq); 3032 } 3033 3034 if (host->cmd) { 3035 host->cmd->error = err; 3036 sdhci_finish_mrq(host, host->cmd->mrq); 3037 } 3038 } 3039 3040 static void sdhci_card_event(struct mmc_host *mmc) 3041 { 3042 struct sdhci_host *host = mmc_priv(mmc); 3043 unsigned long flags; 3044 int present; 3045 3046 /* First check if client has provided their own card event */ 3047 if (host->ops->card_event) 3048 host->ops->card_event(host); 3049 3050 present = mmc->ops->get_cd(mmc); 3051 3052 spin_lock_irqsave(&host->lock, flags); 3053 3054 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3055 if (sdhci_has_requests(host) && !present) { 3056 pr_err("%s: Card removed during transfer!\n", 3057 mmc_hostname(mmc)); 3058 pr_err("%s: Resetting controller.\n", 3059 mmc_hostname(mmc)); 3060 3061 sdhci_reset_for(host, CARD_REMOVED); 3062 3063 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3064 } 3065 3066 spin_unlock_irqrestore(&host->lock, flags); 3067 } 3068 3069 static const struct mmc_host_ops sdhci_ops = { 3070 .request = sdhci_request, 3071 .post_req = sdhci_post_req, 3072 .pre_req = sdhci_pre_req, 3073 .set_ios = sdhci_set_ios, 3074 .get_cd = sdhci_get_cd, 3075 .get_ro = sdhci_get_ro, 3076 .card_hw_reset = sdhci_hw_reset, 3077 .enable_sdio_irq = sdhci_enable_sdio_irq, 3078 .ack_sdio_irq = sdhci_ack_sdio_irq, 3079 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3080 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3081 .execute_tuning = sdhci_execute_tuning, 3082 .card_event = sdhci_card_event, 3083 .card_busy = sdhci_card_busy, 3084 }; 3085 3086 /*****************************************************************************\ 3087 * * 3088 * Request done * 3089 * * 3090 \*****************************************************************************/ 3091 3092 static bool sdhci_request_done(struct sdhci_host *host) 3093 { 3094 unsigned long flags; 3095 struct mmc_request *mrq; 3096 int i; 3097 3098 spin_lock_irqsave(&host->lock, flags); 3099 3100 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3101 mrq = host->mrqs_done[i]; 3102 if (mrq) 3103 break; 3104 } 3105 3106 if (!mrq) { 3107 spin_unlock_irqrestore(&host->lock, flags); 3108 return true; 3109 } 3110 3111 /* 3112 * The controller needs a reset of internal state machines 3113 * upon error conditions. 3114 */ 3115 if (sdhci_needs_reset(host, mrq)) { 3116 /* 3117 * Do not finish until command and data lines are available for 3118 * reset. Note there can only be one other mrq, so it cannot 3119 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3120 * would both be null. 3121 */ 3122 if (host->cmd || host->data_cmd) { 3123 spin_unlock_irqrestore(&host->lock, flags); 3124 return true; 3125 } 3126 3127 /* Some controllers need this kick or reset won't work here */ 3128 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3129 /* This is to force an update */ 3130 host->ops->set_clock(host, host->clock); 3131 3132 sdhci_reset_for(host, REQUEST_ERROR); 3133 3134 host->pending_reset = false; 3135 } 3136 3137 /* 3138 * Always unmap the data buffers if they were mapped by 3139 * sdhci_prepare_data() whenever we finish with a request. 3140 * This avoids leaking DMA mappings on error. 3141 */ 3142 if (host->flags & SDHCI_REQ_USE_DMA) { 3143 struct mmc_data *data = mrq->data; 3144 3145 if (host->use_external_dma && data && 3146 (mrq->cmd->error || data->error)) { 3147 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3148 3149 host->mrqs_done[i] = NULL; 3150 spin_unlock_irqrestore(&host->lock, flags); 3151 dmaengine_terminate_sync(chan); 3152 spin_lock_irqsave(&host->lock, flags); 3153 sdhci_set_mrq_done(host, mrq); 3154 } 3155 3156 if (data && data->host_cookie == COOKIE_MAPPED) { 3157 if (host->bounce_buffer) { 3158 /* 3159 * On reads, copy the bounced data into the 3160 * sglist 3161 */ 3162 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3163 unsigned int length = data->bytes_xfered; 3164 3165 if (length > host->bounce_buffer_size) { 3166 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3167 mmc_hostname(host->mmc), 3168 host->bounce_buffer_size, 3169 data->bytes_xfered); 3170 /* Cap it down and continue */ 3171 length = host->bounce_buffer_size; 3172 } 3173 dma_sync_single_for_cpu( 3174 mmc_dev(host->mmc), 3175 host->bounce_addr, 3176 host->bounce_buffer_size, 3177 DMA_FROM_DEVICE); 3178 sg_copy_from_buffer(data->sg, 3179 data->sg_len, 3180 host->bounce_buffer, 3181 length); 3182 } else { 3183 /* No copying, just switch ownership */ 3184 dma_sync_single_for_cpu( 3185 mmc_dev(host->mmc), 3186 host->bounce_addr, 3187 host->bounce_buffer_size, 3188 mmc_get_dma_dir(data)); 3189 } 3190 } else { 3191 /* Unmap the raw data */ 3192 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3193 data->sg_len, 3194 mmc_get_dma_dir(data)); 3195 } 3196 data->host_cookie = COOKIE_UNMAPPED; 3197 } 3198 } 3199 3200 host->mrqs_done[i] = NULL; 3201 3202 spin_unlock_irqrestore(&host->lock, flags); 3203 3204 if (host->ops->request_done) 3205 host->ops->request_done(host, mrq); 3206 else 3207 mmc_request_done(host->mmc, mrq); 3208 3209 return false; 3210 } 3211 3212 static void sdhci_complete_work(struct work_struct *work) 3213 { 3214 struct sdhci_host *host = container_of(work, struct sdhci_host, 3215 complete_work); 3216 3217 while (!sdhci_request_done(host)) 3218 ; 3219 } 3220 3221 static void sdhci_timeout_timer(struct timer_list *t) 3222 { 3223 struct sdhci_host *host; 3224 unsigned long flags; 3225 3226 host = from_timer(host, t, timer); 3227 3228 spin_lock_irqsave(&host->lock, flags); 3229 3230 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3231 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3232 mmc_hostname(host->mmc)); 3233 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3234 sdhci_dumpregs(host); 3235 3236 host->cmd->error = -ETIMEDOUT; 3237 sdhci_finish_mrq(host, host->cmd->mrq); 3238 } 3239 3240 spin_unlock_irqrestore(&host->lock, flags); 3241 } 3242 3243 static void sdhci_timeout_data_timer(struct timer_list *t) 3244 { 3245 struct sdhci_host *host; 3246 unsigned long flags; 3247 3248 host = from_timer(host, t, data_timer); 3249 3250 spin_lock_irqsave(&host->lock, flags); 3251 3252 if (host->data || host->data_cmd || 3253 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3254 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3255 mmc_hostname(host->mmc)); 3256 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3257 sdhci_dumpregs(host); 3258 3259 if (host->data) { 3260 host->data->error = -ETIMEDOUT; 3261 __sdhci_finish_data(host, true); 3262 queue_work(host->complete_wq, &host->complete_work); 3263 } else if (host->data_cmd) { 3264 host->data_cmd->error = -ETIMEDOUT; 3265 sdhci_finish_mrq(host, host->data_cmd->mrq); 3266 } else { 3267 host->cmd->error = -ETIMEDOUT; 3268 sdhci_finish_mrq(host, host->cmd->mrq); 3269 } 3270 } 3271 3272 spin_unlock_irqrestore(&host->lock, flags); 3273 } 3274 3275 /*****************************************************************************\ 3276 * * 3277 * Interrupt handling * 3278 * * 3279 \*****************************************************************************/ 3280 3281 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3282 { 3283 /* Handle auto-CMD12 error */ 3284 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3285 struct mmc_request *mrq = host->data_cmd->mrq; 3286 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3287 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3288 SDHCI_INT_DATA_TIMEOUT : 3289 SDHCI_INT_DATA_CRC; 3290 3291 /* Treat auto-CMD12 error the same as data error */ 3292 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3293 *intmask_p |= data_err_bit; 3294 return; 3295 } 3296 } 3297 3298 if (!host->cmd) { 3299 /* 3300 * SDHCI recovers from errors by resetting the cmd and data 3301 * circuits. Until that is done, there very well might be more 3302 * interrupts, so ignore them in that case. 3303 */ 3304 if (host->pending_reset) 3305 return; 3306 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3307 mmc_hostname(host->mmc), (unsigned)intmask); 3308 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3309 sdhci_dumpregs(host); 3310 return; 3311 } 3312 3313 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3314 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3315 if (intmask & SDHCI_INT_TIMEOUT) { 3316 host->cmd->error = -ETIMEDOUT; 3317 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3318 } else { 3319 host->cmd->error = -EILSEQ; 3320 if (!mmc_op_tuning(host->cmd->opcode)) 3321 sdhci_err_stats_inc(host, CMD_CRC); 3322 } 3323 /* Treat data command CRC error the same as data CRC error */ 3324 if (host->cmd->data && 3325 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3326 SDHCI_INT_CRC) { 3327 host->cmd = NULL; 3328 *intmask_p |= SDHCI_INT_DATA_CRC; 3329 return; 3330 } 3331 3332 __sdhci_finish_mrq(host, host->cmd->mrq); 3333 return; 3334 } 3335 3336 /* Handle auto-CMD23 error */ 3337 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3338 struct mmc_request *mrq = host->cmd->mrq; 3339 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3340 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3341 -ETIMEDOUT : 3342 -EILSEQ; 3343 3344 sdhci_err_stats_inc(host, AUTO_CMD); 3345 3346 if (sdhci_auto_cmd23(host, mrq)) { 3347 mrq->sbc->error = err; 3348 __sdhci_finish_mrq(host, mrq); 3349 return; 3350 } 3351 } 3352 3353 if (intmask & SDHCI_INT_RESPONSE) 3354 sdhci_finish_command(host); 3355 } 3356 3357 static void sdhci_adma_show_error(struct sdhci_host *host) 3358 { 3359 void *desc = host->adma_table; 3360 dma_addr_t dma = host->adma_addr; 3361 3362 sdhci_dumpregs(host); 3363 3364 while (true) { 3365 struct sdhci_adma2_64_desc *dma_desc = desc; 3366 3367 if (host->flags & SDHCI_USE_64_BIT_DMA) 3368 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3369 (unsigned long long)dma, 3370 le32_to_cpu(dma_desc->addr_hi), 3371 le32_to_cpu(dma_desc->addr_lo), 3372 le16_to_cpu(dma_desc->len), 3373 le16_to_cpu(dma_desc->cmd)); 3374 else 3375 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3376 (unsigned long long)dma, 3377 le32_to_cpu(dma_desc->addr_lo), 3378 le16_to_cpu(dma_desc->len), 3379 le16_to_cpu(dma_desc->cmd)); 3380 3381 desc += host->desc_sz; 3382 dma += host->desc_sz; 3383 3384 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3385 break; 3386 } 3387 } 3388 3389 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3390 { 3391 u32 command; 3392 3393 /* 3394 * CMD19 generates _only_ Buffer Read Ready interrupt if 3395 * use sdhci_send_tuning. 3396 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3397 * If not, sdhci_transfer_pio will never be called, make the 3398 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3399 */ 3400 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3401 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 3402 if (command == MMC_SEND_TUNING_BLOCK || 3403 command == MMC_SEND_TUNING_BLOCK_HS200) { 3404 host->tuning_done = 1; 3405 wake_up(&host->buf_ready_int); 3406 return; 3407 } 3408 } 3409 3410 if (!host->data) { 3411 struct mmc_command *data_cmd = host->data_cmd; 3412 3413 /* 3414 * The "data complete" interrupt is also used to 3415 * indicate that a busy state has ended. See comment 3416 * above in sdhci_cmd_irq(). 3417 */ 3418 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3419 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3420 host->data_cmd = NULL; 3421 data_cmd->error = -ETIMEDOUT; 3422 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3423 __sdhci_finish_mrq(host, data_cmd->mrq); 3424 return; 3425 } 3426 if (intmask & SDHCI_INT_DATA_END) { 3427 host->data_cmd = NULL; 3428 /* 3429 * Some cards handle busy-end interrupt 3430 * before the command completed, so make 3431 * sure we do things in the proper order. 3432 */ 3433 if (host->cmd == data_cmd) 3434 return; 3435 3436 __sdhci_finish_mrq(host, data_cmd->mrq); 3437 return; 3438 } 3439 } 3440 3441 /* 3442 * SDHCI recovers from errors by resetting the cmd and data 3443 * circuits. Until that is done, there very well might be more 3444 * interrupts, so ignore them in that case. 3445 */ 3446 if (host->pending_reset) 3447 return; 3448 3449 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3450 mmc_hostname(host->mmc), (unsigned)intmask); 3451 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3452 sdhci_dumpregs(host); 3453 3454 return; 3455 } 3456 3457 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3458 host->data->error = -ETIMEDOUT; 3459 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3460 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3461 host->data->error = -EILSEQ; 3462 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3463 sdhci_err_stats_inc(host, DAT_CRC); 3464 } else if ((intmask & SDHCI_INT_DATA_CRC) && 3465 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3466 != MMC_BUS_TEST_R) { 3467 host->data->error = -EILSEQ; 3468 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3469 sdhci_err_stats_inc(host, DAT_CRC); 3470 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3471 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3472 intmask); 3473 sdhci_adma_show_error(host); 3474 sdhci_err_stats_inc(host, ADMA); 3475 host->data->error = -EIO; 3476 if (host->ops->adma_workaround) 3477 host->ops->adma_workaround(host, intmask); 3478 } 3479 3480 if (host->data->error) 3481 sdhci_finish_data(host); 3482 else { 3483 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3484 sdhci_transfer_pio(host); 3485 3486 /* 3487 * We currently don't do anything fancy with DMA 3488 * boundaries, but as we can't disable the feature 3489 * we need to at least restart the transfer. 3490 * 3491 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3492 * should return a valid address to continue from, but as 3493 * some controllers are faulty, don't trust them. 3494 */ 3495 if (intmask & SDHCI_INT_DMA_END) { 3496 dma_addr_t dmastart, dmanow; 3497 3498 dmastart = sdhci_sdma_address(host); 3499 dmanow = dmastart + host->data->bytes_xfered; 3500 /* 3501 * Force update to the next DMA block boundary. 3502 */ 3503 dmanow = (dmanow & 3504 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3505 SDHCI_DEFAULT_BOUNDARY_SIZE; 3506 host->data->bytes_xfered = dmanow - dmastart; 3507 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3508 &dmastart, host->data->bytes_xfered, &dmanow); 3509 sdhci_set_sdma_addr(host, dmanow); 3510 } 3511 3512 if (intmask & SDHCI_INT_DATA_END) { 3513 if (host->cmd == host->data_cmd) { 3514 /* 3515 * Data managed to finish before the 3516 * command completed. Make sure we do 3517 * things in the proper order. 3518 */ 3519 host->data_early = 1; 3520 } else { 3521 sdhci_finish_data(host); 3522 } 3523 } 3524 } 3525 } 3526 3527 static inline bool sdhci_defer_done(struct sdhci_host *host, 3528 struct mmc_request *mrq) 3529 { 3530 struct mmc_data *data = mrq->data; 3531 3532 return host->pending_reset || host->always_defer_done || 3533 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3534 data->host_cookie == COOKIE_MAPPED); 3535 } 3536 3537 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3538 { 3539 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3540 irqreturn_t result = IRQ_NONE; 3541 struct sdhci_host *host = dev_id; 3542 u32 intmask, mask, unexpected = 0; 3543 int max_loops = 16; 3544 int i; 3545 3546 spin_lock(&host->lock); 3547 3548 if (host->runtime_suspended) { 3549 spin_unlock(&host->lock); 3550 return IRQ_NONE; 3551 } 3552 3553 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3554 if (!intmask || intmask == 0xffffffff) { 3555 result = IRQ_NONE; 3556 goto out; 3557 } 3558 3559 do { 3560 DBG("IRQ status 0x%08x\n", intmask); 3561 3562 if (host->ops->irq) { 3563 intmask = host->ops->irq(host, intmask); 3564 if (!intmask) 3565 goto cont; 3566 } 3567 3568 /* Clear selected interrupts. */ 3569 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3570 SDHCI_INT_BUS_POWER); 3571 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3572 3573 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3574 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3575 SDHCI_CARD_PRESENT; 3576 3577 /* 3578 * There is a observation on i.mx esdhc. INSERT 3579 * bit will be immediately set again when it gets 3580 * cleared, if a card is inserted. We have to mask 3581 * the irq to prevent interrupt storm which will 3582 * freeze the system. And the REMOVE gets the 3583 * same situation. 3584 * 3585 * More testing are needed here to ensure it works 3586 * for other platforms though. 3587 */ 3588 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3589 SDHCI_INT_CARD_REMOVE); 3590 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3591 SDHCI_INT_CARD_INSERT; 3592 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3593 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3594 3595 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3596 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3597 3598 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3599 SDHCI_INT_CARD_REMOVE); 3600 result = IRQ_WAKE_THREAD; 3601 } 3602 3603 if (intmask & SDHCI_INT_CMD_MASK) 3604 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3605 3606 if (intmask & SDHCI_INT_DATA_MASK) 3607 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3608 3609 if (intmask & SDHCI_INT_BUS_POWER) 3610 pr_err("%s: Card is consuming too much power!\n", 3611 mmc_hostname(host->mmc)); 3612 3613 if (intmask & SDHCI_INT_RETUNE) 3614 mmc_retune_needed(host->mmc); 3615 3616 if ((intmask & SDHCI_INT_CARD_INT) && 3617 (host->ier & SDHCI_INT_CARD_INT)) { 3618 sdhci_enable_sdio_irq_nolock(host, false); 3619 sdio_signal_irq(host->mmc); 3620 } 3621 3622 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3623 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3624 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3625 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3626 3627 if (intmask) { 3628 unexpected |= intmask; 3629 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3630 } 3631 cont: 3632 if (result == IRQ_NONE) 3633 result = IRQ_HANDLED; 3634 3635 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3636 } while (intmask && --max_loops); 3637 3638 /* Determine if mrqs can be completed immediately */ 3639 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3640 struct mmc_request *mrq = host->mrqs_done[i]; 3641 3642 if (!mrq) 3643 continue; 3644 3645 if (sdhci_defer_done(host, mrq)) { 3646 result = IRQ_WAKE_THREAD; 3647 } else { 3648 mrqs_done[i] = mrq; 3649 host->mrqs_done[i] = NULL; 3650 } 3651 } 3652 out: 3653 if (host->deferred_cmd) 3654 result = IRQ_WAKE_THREAD; 3655 3656 spin_unlock(&host->lock); 3657 3658 /* Process mrqs ready for immediate completion */ 3659 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3660 if (!mrqs_done[i]) 3661 continue; 3662 3663 if (host->ops->request_done) 3664 host->ops->request_done(host, mrqs_done[i]); 3665 else 3666 mmc_request_done(host->mmc, mrqs_done[i]); 3667 } 3668 3669 if (unexpected) { 3670 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3671 mmc_hostname(host->mmc), unexpected); 3672 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3673 sdhci_dumpregs(host); 3674 } 3675 3676 return result; 3677 } 3678 3679 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3680 { 3681 struct sdhci_host *host = dev_id; 3682 struct mmc_command *cmd; 3683 unsigned long flags; 3684 u32 isr; 3685 3686 while (!sdhci_request_done(host)) 3687 ; 3688 3689 spin_lock_irqsave(&host->lock, flags); 3690 3691 isr = host->thread_isr; 3692 host->thread_isr = 0; 3693 3694 cmd = host->deferred_cmd; 3695 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3696 sdhci_finish_mrq(host, cmd->mrq); 3697 3698 spin_unlock_irqrestore(&host->lock, flags); 3699 3700 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3701 struct mmc_host *mmc = host->mmc; 3702 3703 mmc->ops->card_event(mmc); 3704 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3705 } 3706 3707 return IRQ_HANDLED; 3708 } 3709 3710 /*****************************************************************************\ 3711 * * 3712 * Suspend/resume * 3713 * * 3714 \*****************************************************************************/ 3715 3716 #ifdef CONFIG_PM 3717 3718 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3719 { 3720 return mmc_card_is_removable(host->mmc) && 3721 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3722 !mmc_can_gpio_cd(host->mmc); 3723 } 3724 3725 /* 3726 * To enable wakeup events, the corresponding events have to be enabled in 3727 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3728 * Table' in the SD Host Controller Standard Specification. 3729 * It is useless to restore SDHCI_INT_ENABLE state in 3730 * sdhci_disable_irq_wakeups() since it will be set by 3731 * sdhci_enable_card_detection() or sdhci_init(). 3732 */ 3733 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3734 { 3735 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3736 SDHCI_WAKE_ON_INT; 3737 u32 irq_val = 0; 3738 u8 wake_val = 0; 3739 u8 val; 3740 3741 if (sdhci_cd_irq_can_wakeup(host)) { 3742 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3743 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3744 } 3745 3746 if (mmc_card_wake_sdio_irq(host->mmc)) { 3747 wake_val |= SDHCI_WAKE_ON_INT; 3748 irq_val |= SDHCI_INT_CARD_INT; 3749 } 3750 3751 if (!irq_val) 3752 return false; 3753 3754 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3755 val &= ~mask; 3756 val |= wake_val; 3757 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3758 3759 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3760 3761 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3762 3763 return host->irq_wake_enabled; 3764 } 3765 3766 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3767 { 3768 u8 val; 3769 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3770 | SDHCI_WAKE_ON_INT; 3771 3772 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3773 val &= ~mask; 3774 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3775 3776 disable_irq_wake(host->irq); 3777 3778 host->irq_wake_enabled = false; 3779 } 3780 3781 int sdhci_suspend_host(struct sdhci_host *host) 3782 { 3783 sdhci_disable_card_detection(host); 3784 3785 mmc_retune_timer_stop(host->mmc); 3786 3787 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3788 !sdhci_enable_irq_wakeups(host)) { 3789 host->ier = 0; 3790 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3791 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3792 free_irq(host->irq, host); 3793 } 3794 3795 return 0; 3796 } 3797 3798 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3799 3800 int sdhci_resume_host(struct sdhci_host *host) 3801 { 3802 struct mmc_host *mmc = host->mmc; 3803 int ret = 0; 3804 3805 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3806 if (host->ops->enable_dma) 3807 host->ops->enable_dma(host); 3808 } 3809 3810 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3811 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3812 /* Card keeps power but host controller does not */ 3813 sdhci_init(host, 0); 3814 host->pwr = 0; 3815 host->clock = 0; 3816 host->reinit_uhs = true; 3817 mmc->ops->set_ios(mmc, &mmc->ios); 3818 } else { 3819 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3820 } 3821 3822 if (host->irq_wake_enabled) { 3823 sdhci_disable_irq_wakeups(host); 3824 } else { 3825 ret = request_threaded_irq(host->irq, sdhci_irq, 3826 sdhci_thread_irq, IRQF_SHARED, 3827 mmc_hostname(mmc), host); 3828 if (ret) 3829 return ret; 3830 } 3831 3832 sdhci_enable_card_detection(host); 3833 3834 return ret; 3835 } 3836 3837 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3838 3839 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3840 { 3841 unsigned long flags; 3842 3843 mmc_retune_timer_stop(host->mmc); 3844 3845 spin_lock_irqsave(&host->lock, flags); 3846 host->ier &= SDHCI_INT_CARD_INT; 3847 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3848 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3849 spin_unlock_irqrestore(&host->lock, flags); 3850 3851 synchronize_hardirq(host->irq); 3852 3853 spin_lock_irqsave(&host->lock, flags); 3854 host->runtime_suspended = true; 3855 spin_unlock_irqrestore(&host->lock, flags); 3856 3857 return 0; 3858 } 3859 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3860 3861 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3862 { 3863 struct mmc_host *mmc = host->mmc; 3864 unsigned long flags; 3865 int host_flags = host->flags; 3866 3867 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3868 if (host->ops->enable_dma) 3869 host->ops->enable_dma(host); 3870 } 3871 3872 sdhci_init(host, soft_reset); 3873 3874 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3875 mmc->ios.power_mode != MMC_POWER_OFF) { 3876 /* Force clock and power re-program */ 3877 host->pwr = 0; 3878 host->clock = 0; 3879 host->reinit_uhs = true; 3880 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3881 mmc->ops->set_ios(mmc, &mmc->ios); 3882 3883 if ((host_flags & SDHCI_PV_ENABLED) && 3884 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3885 spin_lock_irqsave(&host->lock, flags); 3886 sdhci_enable_preset_value(host, true); 3887 spin_unlock_irqrestore(&host->lock, flags); 3888 } 3889 3890 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3891 mmc->ops->hs400_enhanced_strobe) 3892 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3893 } 3894 3895 spin_lock_irqsave(&host->lock, flags); 3896 3897 host->runtime_suspended = false; 3898 3899 /* Enable SDIO IRQ */ 3900 if (sdio_irq_claimed(mmc)) 3901 sdhci_enable_sdio_irq_nolock(host, true); 3902 3903 /* Enable Card Detection */ 3904 sdhci_enable_card_detection(host); 3905 3906 spin_unlock_irqrestore(&host->lock, flags); 3907 3908 return 0; 3909 } 3910 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3911 3912 #endif /* CONFIG_PM */ 3913 3914 /*****************************************************************************\ 3915 * * 3916 * Command Queue Engine (CQE) helpers * 3917 * * 3918 \*****************************************************************************/ 3919 3920 void sdhci_cqe_enable(struct mmc_host *mmc) 3921 { 3922 struct sdhci_host *host = mmc_priv(mmc); 3923 unsigned long flags; 3924 u8 ctrl; 3925 3926 spin_lock_irqsave(&host->lock, flags); 3927 3928 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3929 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3930 /* 3931 * Host from V4.10 supports ADMA3 DMA type. 3932 * ADMA3 performs integrated descriptor which is more suitable 3933 * for cmd queuing to fetch both command and transfer descriptors. 3934 */ 3935 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3936 ctrl |= SDHCI_CTRL_ADMA3; 3937 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3938 ctrl |= SDHCI_CTRL_ADMA64; 3939 else 3940 ctrl |= SDHCI_CTRL_ADMA32; 3941 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3942 3943 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3944 SDHCI_BLOCK_SIZE); 3945 3946 /* Set maximum timeout */ 3947 sdhci_set_timeout(host, NULL); 3948 3949 host->ier = host->cqe_ier; 3950 3951 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3952 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3953 3954 host->cqe_on = true; 3955 3956 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3957 mmc_hostname(mmc), host->ier, 3958 sdhci_readl(host, SDHCI_INT_STATUS)); 3959 3960 spin_unlock_irqrestore(&host->lock, flags); 3961 } 3962 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3963 3964 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3965 { 3966 struct sdhci_host *host = mmc_priv(mmc); 3967 unsigned long flags; 3968 3969 spin_lock_irqsave(&host->lock, flags); 3970 3971 sdhci_set_default_irqs(host); 3972 3973 host->cqe_on = false; 3974 3975 if (recovery) 3976 sdhci_reset_for(host, CQE_RECOVERY); 3977 3978 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3979 mmc_hostname(mmc), host->ier, 3980 sdhci_readl(host, SDHCI_INT_STATUS)); 3981 3982 spin_unlock_irqrestore(&host->lock, flags); 3983 } 3984 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3985 3986 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3987 int *data_error) 3988 { 3989 u32 mask; 3990 3991 if (!host->cqe_on) 3992 return false; 3993 3994 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 3995 *cmd_error = -EILSEQ; 3996 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3997 sdhci_err_stats_inc(host, CMD_CRC); 3998 } else if (intmask & SDHCI_INT_TIMEOUT) { 3999 *cmd_error = -ETIMEDOUT; 4000 sdhci_err_stats_inc(host, CMD_TIMEOUT); 4001 } else 4002 *cmd_error = 0; 4003 4004 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) { 4005 *data_error = -EILSEQ; 4006 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 4007 sdhci_err_stats_inc(host, DAT_CRC); 4008 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 4009 *data_error = -ETIMEDOUT; 4010 sdhci_err_stats_inc(host, DAT_TIMEOUT); 4011 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 4012 *data_error = -EIO; 4013 sdhci_err_stats_inc(host, ADMA); 4014 } else 4015 *data_error = 0; 4016 4017 /* Clear selected interrupts. */ 4018 mask = intmask & host->cqe_ier; 4019 sdhci_writel(host, mask, SDHCI_INT_STATUS); 4020 4021 if (intmask & SDHCI_INT_BUS_POWER) 4022 pr_err("%s: Card is consuming too much power!\n", 4023 mmc_hostname(host->mmc)); 4024 4025 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 4026 if (intmask) { 4027 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 4028 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 4029 mmc_hostname(host->mmc), intmask); 4030 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 4031 sdhci_dumpregs(host); 4032 } 4033 4034 return true; 4035 } 4036 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 4037 4038 /*****************************************************************************\ 4039 * * 4040 * Device allocation/registration * 4041 * * 4042 \*****************************************************************************/ 4043 4044 struct sdhci_host *sdhci_alloc_host(struct device *dev, 4045 size_t priv_size) 4046 { 4047 struct mmc_host *mmc; 4048 struct sdhci_host *host; 4049 4050 WARN_ON(dev == NULL); 4051 4052 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4053 if (!mmc) 4054 return ERR_PTR(-ENOMEM); 4055 4056 host = mmc_priv(mmc); 4057 host->mmc = mmc; 4058 host->mmc_host_ops = sdhci_ops; 4059 mmc->ops = &host->mmc_host_ops; 4060 4061 host->flags = SDHCI_SIGNALING_330; 4062 4063 host->cqe_ier = SDHCI_CQE_INT_MASK; 4064 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4065 4066 host->tuning_delay = -1; 4067 host->tuning_loop_count = MAX_TUNING_LOOP; 4068 4069 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4070 4071 /* 4072 * The DMA table descriptor count is calculated as the maximum 4073 * number of segments times 2, to allow for an alignment 4074 * descriptor for each segment, plus 1 for a nop end descriptor. 4075 */ 4076 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4077 host->max_adma = 65536; 4078 4079 host->max_timeout_count = 0xE; 4080 4081 return host; 4082 } 4083 4084 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4085 4086 static int sdhci_set_dma_mask(struct sdhci_host *host) 4087 { 4088 struct mmc_host *mmc = host->mmc; 4089 struct device *dev = mmc_dev(mmc); 4090 int ret = -EINVAL; 4091 4092 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4093 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4094 4095 /* Try 64-bit mask if hardware is capable of it */ 4096 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4097 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4098 if (ret) { 4099 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4100 mmc_hostname(mmc)); 4101 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4102 } 4103 } 4104 4105 /* 32-bit mask as default & fallback */ 4106 if (ret) { 4107 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4108 if (ret) 4109 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4110 mmc_hostname(mmc)); 4111 } 4112 4113 return ret; 4114 } 4115 4116 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4117 const u32 *caps, const u32 *caps1) 4118 { 4119 u16 v; 4120 u64 dt_caps_mask = 0; 4121 u64 dt_caps = 0; 4122 4123 if (host->read_caps) 4124 return; 4125 4126 host->read_caps = true; 4127 4128 if (debug_quirks) 4129 host->quirks = debug_quirks; 4130 4131 if (debug_quirks2) 4132 host->quirks2 = debug_quirks2; 4133 4134 sdhci_reset_for_all(host); 4135 4136 if (host->v4_mode) 4137 sdhci_do_enable_v4_mode(host); 4138 4139 device_property_read_u64(mmc_dev(host->mmc), 4140 "sdhci-caps-mask", &dt_caps_mask); 4141 device_property_read_u64(mmc_dev(host->mmc), 4142 "sdhci-caps", &dt_caps); 4143 4144 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4145 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4146 4147 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 4148 return; 4149 4150 if (caps) { 4151 host->caps = *caps; 4152 } else { 4153 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4154 host->caps &= ~lower_32_bits(dt_caps_mask); 4155 host->caps |= lower_32_bits(dt_caps); 4156 } 4157 4158 if (host->version < SDHCI_SPEC_300) 4159 return; 4160 4161 if (caps1) { 4162 host->caps1 = *caps1; 4163 } else { 4164 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4165 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4166 host->caps1 |= upper_32_bits(dt_caps); 4167 } 4168 } 4169 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4170 4171 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4172 { 4173 struct mmc_host *mmc = host->mmc; 4174 unsigned int max_blocks; 4175 unsigned int bounce_size; 4176 int ret; 4177 4178 /* 4179 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4180 * has diminishing returns, this is probably because SD/MMC 4181 * cards are usually optimized to handle this size of requests. 4182 */ 4183 bounce_size = SZ_64K; 4184 /* 4185 * Adjust downwards to maximum request size if this is less 4186 * than our segment size, else hammer down the maximum 4187 * request size to the maximum buffer size. 4188 */ 4189 if (mmc->max_req_size < bounce_size) 4190 bounce_size = mmc->max_req_size; 4191 max_blocks = bounce_size / 512; 4192 4193 /* 4194 * When we just support one segment, we can get significant 4195 * speedups by the help of a bounce buffer to group scattered 4196 * reads/writes together. 4197 */ 4198 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4199 bounce_size, 4200 GFP_KERNEL); 4201 if (!host->bounce_buffer) { 4202 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4203 mmc_hostname(mmc), 4204 bounce_size); 4205 /* 4206 * Exiting with zero here makes sure we proceed with 4207 * mmc->max_segs == 1. 4208 */ 4209 return; 4210 } 4211 4212 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4213 host->bounce_buffer, 4214 bounce_size, 4215 DMA_BIDIRECTIONAL); 4216 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4217 if (ret) { 4218 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4219 host->bounce_buffer = NULL; 4220 /* Again fall back to max_segs == 1 */ 4221 return; 4222 } 4223 4224 host->bounce_buffer_size = bounce_size; 4225 4226 /* Lie about this since we're bouncing */ 4227 mmc->max_segs = max_blocks; 4228 mmc->max_seg_size = bounce_size; 4229 mmc->max_req_size = bounce_size; 4230 4231 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4232 mmc_hostname(mmc), max_blocks, bounce_size); 4233 } 4234 4235 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4236 { 4237 /* 4238 * According to SD Host Controller spec v4.10, bit[27] added from 4239 * version 4.10 in Capabilities Register is used as 64-bit System 4240 * Address support for V4 mode. 4241 */ 4242 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4243 return host->caps & SDHCI_CAN_64BIT_V4; 4244 4245 return host->caps & SDHCI_CAN_64BIT; 4246 } 4247 4248 int sdhci_setup_host(struct sdhci_host *host) 4249 { 4250 struct mmc_host *mmc; 4251 u32 max_current_caps; 4252 unsigned int ocr_avail; 4253 unsigned int override_timeout_clk; 4254 u32 max_clk; 4255 int ret = 0; 4256 bool enable_vqmmc = false; 4257 4258 WARN_ON(host == NULL); 4259 if (host == NULL) 4260 return -EINVAL; 4261 4262 mmc = host->mmc; 4263 4264 /* 4265 * If there are external regulators, get them. Note this must be done 4266 * early before resetting the host and reading the capabilities so that 4267 * the host can take the appropriate action if regulators are not 4268 * available. 4269 */ 4270 if (!mmc->supply.vqmmc) { 4271 ret = mmc_regulator_get_supply(mmc); 4272 if (ret) 4273 return ret; 4274 enable_vqmmc = true; 4275 } 4276 4277 DBG("Version: 0x%08x | Present: 0x%08x\n", 4278 sdhci_readw(host, SDHCI_HOST_VERSION), 4279 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4280 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4281 sdhci_readl(host, SDHCI_CAPABILITIES), 4282 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4283 4284 sdhci_read_caps(host); 4285 4286 override_timeout_clk = host->timeout_clk; 4287 4288 if (host->version > SDHCI_SPEC_420) { 4289 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4290 mmc_hostname(mmc), host->version); 4291 } 4292 4293 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4294 host->flags |= SDHCI_USE_SDMA; 4295 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4296 DBG("Controller doesn't have SDMA capability\n"); 4297 else 4298 host->flags |= SDHCI_USE_SDMA; 4299 4300 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4301 (host->flags & SDHCI_USE_SDMA)) { 4302 DBG("Disabling DMA as it is marked broken\n"); 4303 host->flags &= ~SDHCI_USE_SDMA; 4304 } 4305 4306 if ((host->version >= SDHCI_SPEC_200) && 4307 (host->caps & SDHCI_CAN_DO_ADMA2)) 4308 host->flags |= SDHCI_USE_ADMA; 4309 4310 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4311 (host->flags & SDHCI_USE_ADMA)) { 4312 DBG("Disabling ADMA as it is marked broken\n"); 4313 host->flags &= ~SDHCI_USE_ADMA; 4314 } 4315 4316 if (sdhci_can_64bit_dma(host)) 4317 host->flags |= SDHCI_USE_64_BIT_DMA; 4318 4319 if (host->use_external_dma) { 4320 ret = sdhci_external_dma_init(host); 4321 if (ret == -EPROBE_DEFER) 4322 goto unreg; 4323 /* 4324 * Fall back to use the DMA/PIO integrated in standard SDHCI 4325 * instead of external DMA devices. 4326 */ 4327 else if (ret) 4328 sdhci_switch_external_dma(host, false); 4329 /* Disable internal DMA sources */ 4330 else 4331 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4332 } 4333 4334 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4335 if (host->ops->set_dma_mask) 4336 ret = host->ops->set_dma_mask(host); 4337 else 4338 ret = sdhci_set_dma_mask(host); 4339 4340 if (!ret && host->ops->enable_dma) 4341 ret = host->ops->enable_dma(host); 4342 4343 if (ret) { 4344 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4345 mmc_hostname(mmc)); 4346 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4347 4348 ret = 0; 4349 } 4350 } 4351 4352 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4353 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4354 host->flags &= ~SDHCI_USE_SDMA; 4355 4356 if (host->flags & SDHCI_USE_ADMA) { 4357 dma_addr_t dma; 4358 void *buf; 4359 4360 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4361 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4362 else if (!host->alloc_desc_sz) 4363 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4364 4365 host->desc_sz = host->alloc_desc_sz; 4366 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4367 4368 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4369 /* 4370 * Use zalloc to zero the reserved high 32-bits of 128-bit 4371 * descriptors so that they never need to be written. 4372 */ 4373 buf = dma_alloc_coherent(mmc_dev(mmc), 4374 host->align_buffer_sz + host->adma_table_sz, 4375 &dma, GFP_KERNEL); 4376 if (!buf) { 4377 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4378 mmc_hostname(mmc)); 4379 host->flags &= ~SDHCI_USE_ADMA; 4380 } else if ((dma + host->align_buffer_sz) & 4381 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4382 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4383 mmc_hostname(mmc)); 4384 host->flags &= ~SDHCI_USE_ADMA; 4385 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4386 host->adma_table_sz, buf, dma); 4387 } else { 4388 host->align_buffer = buf; 4389 host->align_addr = dma; 4390 4391 host->adma_table = buf + host->align_buffer_sz; 4392 host->adma_addr = dma + host->align_buffer_sz; 4393 } 4394 } 4395 4396 /* 4397 * If we use DMA, then it's up to the caller to set the DMA 4398 * mask, but PIO does not need the hw shim so we set a new 4399 * mask here in that case. 4400 */ 4401 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4402 host->dma_mask = DMA_BIT_MASK(64); 4403 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4404 } 4405 4406 if (host->version >= SDHCI_SPEC_300) 4407 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4408 else 4409 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4410 4411 host->max_clk *= 1000000; 4412 if (host->max_clk == 0 || host->quirks & 4413 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4414 if (!host->ops->get_max_clock) { 4415 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4416 mmc_hostname(mmc)); 4417 ret = -ENODEV; 4418 goto undma; 4419 } 4420 host->max_clk = host->ops->get_max_clock(host); 4421 } 4422 4423 /* 4424 * In case of Host Controller v3.00, find out whether clock 4425 * multiplier is supported. 4426 */ 4427 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4428 4429 /* 4430 * In case the value in Clock Multiplier is 0, then programmable 4431 * clock mode is not supported, otherwise the actual clock 4432 * multiplier is one more than the value of Clock Multiplier 4433 * in the Capabilities Register. 4434 */ 4435 if (host->clk_mul) 4436 host->clk_mul += 1; 4437 4438 /* 4439 * Set host parameters. 4440 */ 4441 max_clk = host->max_clk; 4442 4443 if (host->ops->get_min_clock) 4444 mmc->f_min = host->ops->get_min_clock(host); 4445 else if (host->version >= SDHCI_SPEC_300) { 4446 if (host->clk_mul) 4447 max_clk = host->max_clk * host->clk_mul; 4448 /* 4449 * Divided Clock Mode minimum clock rate is always less than 4450 * Programmable Clock Mode minimum clock rate. 4451 */ 4452 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4453 } else 4454 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4455 4456 if (!mmc->f_max || mmc->f_max > max_clk) 4457 mmc->f_max = max_clk; 4458 4459 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4460 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4461 4462 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4463 host->timeout_clk *= 1000; 4464 4465 if (host->timeout_clk == 0) { 4466 if (!host->ops->get_timeout_clock) { 4467 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4468 mmc_hostname(mmc)); 4469 ret = -ENODEV; 4470 goto undma; 4471 } 4472 4473 host->timeout_clk = 4474 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4475 1000); 4476 } 4477 4478 if (override_timeout_clk) 4479 host->timeout_clk = override_timeout_clk; 4480 4481 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4482 host->ops->get_max_timeout_count(host) : 1 << 27; 4483 mmc->max_busy_timeout /= host->timeout_clk; 4484 } 4485 4486 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4487 !host->ops->get_max_timeout_count) 4488 mmc->max_busy_timeout = 0; 4489 4490 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4491 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4492 4493 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4494 host->flags |= SDHCI_AUTO_CMD12; 4495 4496 /* 4497 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4498 * For v4 mode, SDMA may use Auto-CMD23 as well. 4499 */ 4500 if ((host->version >= SDHCI_SPEC_300) && 4501 ((host->flags & SDHCI_USE_ADMA) || 4502 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4503 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4504 host->flags |= SDHCI_AUTO_CMD23; 4505 DBG("Auto-CMD23 available\n"); 4506 } else { 4507 DBG("Auto-CMD23 unavailable\n"); 4508 } 4509 4510 /* 4511 * A controller may support 8-bit width, but the board itself 4512 * might not have the pins brought out. Boards that support 4513 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4514 * their platform code before calling sdhci_add_host(), and we 4515 * won't assume 8-bit width for hosts without that CAP. 4516 */ 4517 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4518 mmc->caps |= MMC_CAP_4_BIT_DATA; 4519 4520 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4521 mmc->caps &= ~MMC_CAP_CMD23; 4522 4523 if (host->caps & SDHCI_CAN_DO_HISPD) 4524 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4525 4526 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4527 mmc_card_is_removable(mmc) && 4528 mmc_gpio_get_cd(mmc) < 0) 4529 mmc->caps |= MMC_CAP_NEEDS_POLL; 4530 4531 if (!IS_ERR(mmc->supply.vqmmc)) { 4532 if (enable_vqmmc) { 4533 ret = regulator_enable(mmc->supply.vqmmc); 4534 host->sdhci_core_to_disable_vqmmc = !ret; 4535 } 4536 4537 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4538 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4539 1950000)) 4540 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4541 SDHCI_SUPPORT_SDR50 | 4542 SDHCI_SUPPORT_DDR50); 4543 4544 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4545 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4546 3600000)) 4547 host->flags &= ~SDHCI_SIGNALING_330; 4548 4549 if (ret) { 4550 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4551 mmc_hostname(mmc), ret); 4552 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4553 } 4554 4555 } 4556 4557 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4558 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4559 SDHCI_SUPPORT_DDR50); 4560 /* 4561 * The SDHCI controller in a SoC might support HS200/HS400 4562 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4563 * but if the board is modeled such that the IO lines are not 4564 * connected to 1.8v then HS200/HS400 cannot be supported. 4565 * Disable HS200/HS400 if the board does not have 1.8v connected 4566 * to the IO lines. (Applicable for other modes in 1.8v) 4567 */ 4568 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4569 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4570 } 4571 4572 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4573 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4574 SDHCI_SUPPORT_DDR50)) 4575 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4576 4577 /* SDR104 supports also implies SDR50 support */ 4578 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4579 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4580 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4581 * field can be promoted to support HS200. 4582 */ 4583 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4584 mmc->caps2 |= MMC_CAP2_HS200; 4585 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4586 mmc->caps |= MMC_CAP_UHS_SDR50; 4587 } 4588 4589 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4590 (host->caps1 & SDHCI_SUPPORT_HS400)) 4591 mmc->caps2 |= MMC_CAP2_HS400; 4592 4593 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4594 (IS_ERR(mmc->supply.vqmmc) || 4595 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4596 1300000))) 4597 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4598 4599 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4600 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4601 mmc->caps |= MMC_CAP_UHS_DDR50; 4602 4603 /* Does the host need tuning for SDR50? */ 4604 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4605 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4606 4607 /* Driver Type(s) (A, C, D) supported by the host */ 4608 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4609 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4610 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4611 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4612 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4613 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4614 4615 /* Initial value for re-tuning timer count */ 4616 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4617 host->caps1); 4618 4619 /* 4620 * In case Re-tuning Timer is not disabled, the actual value of 4621 * re-tuning timer will be 2 ^ (n - 1). 4622 */ 4623 if (host->tuning_count) 4624 host->tuning_count = 1 << (host->tuning_count - 1); 4625 4626 /* Re-tuning mode supported by the Host Controller */ 4627 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4628 4629 ocr_avail = 0; 4630 4631 /* 4632 * According to SD Host Controller spec v3.00, if the Host System 4633 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4634 * the value is meaningful only if Voltage Support in the Capabilities 4635 * register is set. The actual current value is 4 times the register 4636 * value. 4637 */ 4638 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4639 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4640 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4641 if (curr > 0) { 4642 4643 /* convert to SDHCI_MAX_CURRENT format */ 4644 curr = curr/1000; /* convert to mA */ 4645 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4646 4647 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4648 max_current_caps = 4649 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4650 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4651 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4652 } 4653 } 4654 4655 if (host->caps & SDHCI_CAN_VDD_330) { 4656 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4657 4658 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4659 max_current_caps) * 4660 SDHCI_MAX_CURRENT_MULTIPLIER; 4661 } 4662 if (host->caps & SDHCI_CAN_VDD_300) { 4663 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4664 4665 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4666 max_current_caps) * 4667 SDHCI_MAX_CURRENT_MULTIPLIER; 4668 } 4669 if (host->caps & SDHCI_CAN_VDD_180) { 4670 ocr_avail |= MMC_VDD_165_195; 4671 4672 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4673 max_current_caps) * 4674 SDHCI_MAX_CURRENT_MULTIPLIER; 4675 } 4676 4677 /* If OCR set by host, use it instead. */ 4678 if (host->ocr_mask) 4679 ocr_avail = host->ocr_mask; 4680 4681 /* If OCR set by external regulators, give it highest prio. */ 4682 if (mmc->ocr_avail) 4683 ocr_avail = mmc->ocr_avail; 4684 4685 mmc->ocr_avail = ocr_avail; 4686 mmc->ocr_avail_sdio = ocr_avail; 4687 if (host->ocr_avail_sdio) 4688 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4689 mmc->ocr_avail_sd = ocr_avail; 4690 if (host->ocr_avail_sd) 4691 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4692 else /* normal SD controllers don't support 1.8V */ 4693 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4694 mmc->ocr_avail_mmc = ocr_avail; 4695 if (host->ocr_avail_mmc) 4696 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4697 4698 if (mmc->ocr_avail == 0) { 4699 pr_err("%s: Hardware doesn't report any support voltages.\n", 4700 mmc_hostname(mmc)); 4701 ret = -ENODEV; 4702 goto unreg; 4703 } 4704 4705 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4706 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4707 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4708 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4709 host->flags |= SDHCI_SIGNALING_180; 4710 4711 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4712 host->flags |= SDHCI_SIGNALING_120; 4713 4714 spin_lock_init(&host->lock); 4715 4716 /* 4717 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4718 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4719 * is less anyway. 4720 */ 4721 mmc->max_req_size = 524288; 4722 4723 /* 4724 * Maximum number of segments. Depends on if the hardware 4725 * can do scatter/gather or not. 4726 */ 4727 if (host->flags & SDHCI_USE_ADMA) { 4728 mmc->max_segs = SDHCI_MAX_SEGS; 4729 } else if (host->flags & SDHCI_USE_SDMA) { 4730 mmc->max_segs = 1; 4731 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4732 dma_max_mapping_size(mmc_dev(mmc))); 4733 } else { /* PIO */ 4734 mmc->max_segs = SDHCI_MAX_SEGS; 4735 } 4736 4737 /* 4738 * Maximum segment size. Could be one segment with the maximum number 4739 * of bytes. When doing hardware scatter/gather, each entry cannot 4740 * be larger than 64 KiB though. 4741 */ 4742 if (host->flags & SDHCI_USE_ADMA) { 4743 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4744 host->max_adma = 65532; /* 32-bit alignment */ 4745 mmc->max_seg_size = 65535; 4746 } else { 4747 mmc->max_seg_size = 65536; 4748 } 4749 } else { 4750 mmc->max_seg_size = mmc->max_req_size; 4751 } 4752 4753 /* 4754 * Maximum block size. This varies from controller to controller and 4755 * is specified in the capabilities register. 4756 */ 4757 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4758 mmc->max_blk_size = 2; 4759 } else { 4760 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4761 SDHCI_MAX_BLOCK_SHIFT; 4762 if (mmc->max_blk_size >= 3) { 4763 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4764 mmc_hostname(mmc)); 4765 mmc->max_blk_size = 0; 4766 } 4767 } 4768 4769 mmc->max_blk_size = 512 << mmc->max_blk_size; 4770 4771 /* 4772 * Maximum block count. 4773 */ 4774 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4775 4776 if (mmc->max_segs == 1) 4777 /* This may alter mmc->*_blk_* parameters */ 4778 sdhci_allocate_bounce_buffer(host); 4779 4780 return 0; 4781 4782 unreg: 4783 if (host->sdhci_core_to_disable_vqmmc) 4784 regulator_disable(mmc->supply.vqmmc); 4785 undma: 4786 if (host->align_buffer) 4787 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4788 host->adma_table_sz, host->align_buffer, 4789 host->align_addr); 4790 host->adma_table = NULL; 4791 host->align_buffer = NULL; 4792 4793 return ret; 4794 } 4795 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4796 4797 void sdhci_cleanup_host(struct sdhci_host *host) 4798 { 4799 struct mmc_host *mmc = host->mmc; 4800 4801 if (host->sdhci_core_to_disable_vqmmc) 4802 regulator_disable(mmc->supply.vqmmc); 4803 4804 if (host->align_buffer) 4805 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4806 host->adma_table_sz, host->align_buffer, 4807 host->align_addr); 4808 4809 if (host->use_external_dma) 4810 sdhci_external_dma_release(host); 4811 4812 host->adma_table = NULL; 4813 host->align_buffer = NULL; 4814 } 4815 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4816 4817 int __sdhci_add_host(struct sdhci_host *host) 4818 { 4819 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4820 struct mmc_host *mmc = host->mmc; 4821 int ret; 4822 4823 if ((mmc->caps2 & MMC_CAP2_CQE) && 4824 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4825 mmc->caps2 &= ~MMC_CAP2_CQE; 4826 mmc->cqe_ops = NULL; 4827 } 4828 4829 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4830 if (!host->complete_wq) 4831 return -ENOMEM; 4832 4833 INIT_WORK(&host->complete_work, sdhci_complete_work); 4834 4835 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4836 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4837 4838 init_waitqueue_head(&host->buf_ready_int); 4839 4840 sdhci_init(host, 0); 4841 4842 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4843 IRQF_SHARED, mmc_hostname(mmc), host); 4844 if (ret) { 4845 pr_err("%s: Failed to request IRQ %d: %d\n", 4846 mmc_hostname(mmc), host->irq, ret); 4847 goto unwq; 4848 } 4849 4850 ret = sdhci_led_register(host); 4851 if (ret) { 4852 pr_err("%s: Failed to register LED device: %d\n", 4853 mmc_hostname(mmc), ret); 4854 goto unirq; 4855 } 4856 4857 ret = mmc_add_host(mmc); 4858 if (ret) 4859 goto unled; 4860 4861 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4862 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4863 host->use_external_dma ? "External DMA" : 4864 (host->flags & SDHCI_USE_ADMA) ? 4865 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4866 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4867 4868 sdhci_enable_card_detection(host); 4869 4870 return 0; 4871 4872 unled: 4873 sdhci_led_unregister(host); 4874 unirq: 4875 sdhci_reset_for_all(host); 4876 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4877 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4878 free_irq(host->irq, host); 4879 unwq: 4880 destroy_workqueue(host->complete_wq); 4881 4882 return ret; 4883 } 4884 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4885 4886 int sdhci_add_host(struct sdhci_host *host) 4887 { 4888 int ret; 4889 4890 ret = sdhci_setup_host(host); 4891 if (ret) 4892 return ret; 4893 4894 ret = __sdhci_add_host(host); 4895 if (ret) 4896 goto cleanup; 4897 4898 return 0; 4899 4900 cleanup: 4901 sdhci_cleanup_host(host); 4902 4903 return ret; 4904 } 4905 EXPORT_SYMBOL_GPL(sdhci_add_host); 4906 4907 void sdhci_remove_host(struct sdhci_host *host, int dead) 4908 { 4909 struct mmc_host *mmc = host->mmc; 4910 unsigned long flags; 4911 4912 if (dead) { 4913 spin_lock_irqsave(&host->lock, flags); 4914 4915 host->flags |= SDHCI_DEVICE_DEAD; 4916 4917 if (sdhci_has_requests(host)) { 4918 pr_err("%s: Controller removed during " 4919 " transfer!\n", mmc_hostname(mmc)); 4920 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4921 } 4922 4923 spin_unlock_irqrestore(&host->lock, flags); 4924 } 4925 4926 sdhci_disable_card_detection(host); 4927 4928 mmc_remove_host(mmc); 4929 4930 sdhci_led_unregister(host); 4931 4932 if (!dead) 4933 sdhci_reset_for_all(host); 4934 4935 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4936 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4937 free_irq(host->irq, host); 4938 4939 del_timer_sync(&host->timer); 4940 del_timer_sync(&host->data_timer); 4941 4942 destroy_workqueue(host->complete_wq); 4943 4944 if (host->sdhci_core_to_disable_vqmmc) 4945 regulator_disable(mmc->supply.vqmmc); 4946 4947 if (host->align_buffer) 4948 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4949 host->adma_table_sz, host->align_buffer, 4950 host->align_addr); 4951 4952 if (host->use_external_dma) 4953 sdhci_external_dma_release(host); 4954 4955 host->adma_table = NULL; 4956 host->align_buffer = NULL; 4957 } 4958 4959 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4960 4961 void sdhci_free_host(struct sdhci_host *host) 4962 { 4963 mmc_free_host(host->mmc); 4964 } 4965 4966 EXPORT_SYMBOL_GPL(sdhci_free_host); 4967 4968 /*****************************************************************************\ 4969 * * 4970 * Driver init/exit * 4971 * * 4972 \*****************************************************************************/ 4973 4974 static int __init sdhci_drv_init(void) 4975 { 4976 pr_info(DRIVER_NAME 4977 ": Secure Digital Host Controller Interface driver\n"); 4978 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4979 4980 return 0; 4981 } 4982 4983 static void __exit sdhci_drv_exit(void) 4984 { 4985 } 4986 4987 module_init(sdhci_drv_init); 4988 module_exit(sdhci_drv_exit); 4989 4990 module_param(debug_quirks, uint, 0444); 4991 module_param(debug_quirks2, uint, 0444); 4992 4993 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4994 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4995 MODULE_LICENSE("GPL"); 4996 4997 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4998 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4999