1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 53 54 void sdhci_dumpregs(struct sdhci_host *host) 55 { 56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 57 58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 92 sdhci_readl(host, SDHCI_RESPONSE), 93 sdhci_readl(host, SDHCI_RESPONSE + 4)); 94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE + 8), 96 sdhci_readl(host, SDHCI_RESPONSE + 12)); 97 SDHCI_DUMP("Host ctl2: 0x%08x\n", 98 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 99 100 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_64_BIT_DMA) { 102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 sdhci_readl(host, SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 106 } else { 107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 sdhci_readl(host, SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 110 } 111 } 112 113 if (host->ops->dump_vendor_regs) 114 host->ops->dump_vendor_regs(host); 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (1) { 220 bool timedout = ktime_after(ktime_get(), timeout); 221 222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 223 break; 224 if (timedout) { 225 pr_err("%s: Reset 0x%x never completed.\n", 226 mmc_hostname(host->mmc), (int)mask); 227 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 228 sdhci_dumpregs(host); 229 return; 230 } 231 udelay(10); 232 } 233 } 234 EXPORT_SYMBOL_GPL(sdhci_reset); 235 236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 { 238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 239 struct mmc_host *mmc = host->mmc; 240 241 if (!mmc->ops->get_cd(mmc)) 242 return false; 243 } 244 245 host->ops->reset(host, mask); 246 247 return true; 248 } 249 250 static void sdhci_reset_for_all(struct sdhci_host *host) 251 { 252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 254 if (host->ops->enable_dma) 255 host->ops->enable_dma(host); 256 } 257 /* Resetting the controller clears many */ 258 host->preset_enabled = false; 259 } 260 } 261 262 enum sdhci_reset_reason { 263 SDHCI_RESET_FOR_INIT, 264 SDHCI_RESET_FOR_REQUEST_ERROR, 265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 266 SDHCI_RESET_FOR_TUNING_ABORT, 267 SDHCI_RESET_FOR_CARD_REMOVED, 268 SDHCI_RESET_FOR_CQE_RECOVERY, 269 }; 270 271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 272 { 273 switch (reason) { 274 case SDHCI_RESET_FOR_INIT: 275 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 276 break; 277 case SDHCI_RESET_FOR_REQUEST_ERROR: 278 case SDHCI_RESET_FOR_TUNING_ABORT: 279 case SDHCI_RESET_FOR_CARD_REMOVED: 280 case SDHCI_RESET_FOR_CQE_RECOVERY: 281 sdhci_do_reset(host, SDHCI_RESET_CMD); 282 sdhci_do_reset(host, SDHCI_RESET_DATA); 283 break; 284 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 285 sdhci_do_reset(host, SDHCI_RESET_DATA); 286 break; 287 } 288 } 289 290 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 291 292 static void sdhci_set_default_irqs(struct sdhci_host *host) 293 { 294 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 295 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 296 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 297 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 298 SDHCI_INT_RESPONSE; 299 300 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 301 host->tuning_mode == SDHCI_TUNING_MODE_3) 302 host->ier |= SDHCI_INT_RETUNE; 303 304 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 305 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 306 } 307 308 static void sdhci_config_dma(struct sdhci_host *host) 309 { 310 u8 ctrl; 311 u16 ctrl2; 312 313 if (host->version < SDHCI_SPEC_200) 314 return; 315 316 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 317 318 /* 319 * Always adjust the DMA selection as some controllers 320 * (e.g. JMicron) can't do PIO properly when the selection 321 * is ADMA. 322 */ 323 ctrl &= ~SDHCI_CTRL_DMA_MASK; 324 if (!(host->flags & SDHCI_REQ_USE_DMA)) 325 goto out; 326 327 /* Note if DMA Select is zero then SDMA is selected */ 328 if (host->flags & SDHCI_USE_ADMA) 329 ctrl |= SDHCI_CTRL_ADMA32; 330 331 if (host->flags & SDHCI_USE_64_BIT_DMA) { 332 /* 333 * If v4 mode, all supported DMA can be 64-bit addressing if 334 * controller supports 64-bit system address, otherwise only 335 * ADMA can support 64-bit addressing. 336 */ 337 if (host->v4_mode) { 338 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 339 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 340 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 341 } else if (host->flags & SDHCI_USE_ADMA) { 342 /* 343 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 344 * set SDHCI_CTRL_ADMA64. 345 */ 346 ctrl |= SDHCI_CTRL_ADMA64; 347 } 348 } 349 350 out: 351 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 352 } 353 354 static void sdhci_init(struct sdhci_host *host, int soft) 355 { 356 struct mmc_host *mmc = host->mmc; 357 unsigned long flags; 358 359 if (soft) 360 sdhci_reset_for(host, INIT); 361 else 362 sdhci_reset_for_all(host); 363 364 if (host->v4_mode) 365 sdhci_do_enable_v4_mode(host); 366 367 spin_lock_irqsave(&host->lock, flags); 368 sdhci_set_default_irqs(host); 369 spin_unlock_irqrestore(&host->lock, flags); 370 371 host->cqe_on = false; 372 373 if (soft) { 374 /* force clock reconfiguration */ 375 host->clock = 0; 376 host->reinit_uhs = true; 377 mmc->ops->set_ios(mmc, &mmc->ios); 378 } 379 } 380 381 static void sdhci_reinit(struct sdhci_host *host) 382 { 383 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 384 385 sdhci_init(host, 0); 386 sdhci_enable_card_detection(host); 387 388 /* 389 * A change to the card detect bits indicates a change in present state, 390 * refer sdhci_set_card_detection(). A card detect interrupt might have 391 * been missed while the host controller was being reset, so trigger a 392 * rescan to check. 393 */ 394 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 395 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 396 } 397 398 static void __sdhci_led_activate(struct sdhci_host *host) 399 { 400 u8 ctrl; 401 402 if (host->quirks & SDHCI_QUIRK_NO_LED) 403 return; 404 405 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 406 ctrl |= SDHCI_CTRL_LED; 407 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 408 } 409 410 static void __sdhci_led_deactivate(struct sdhci_host *host) 411 { 412 u8 ctrl; 413 414 if (host->quirks & SDHCI_QUIRK_NO_LED) 415 return; 416 417 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 418 ctrl &= ~SDHCI_CTRL_LED; 419 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 420 } 421 422 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 423 static void sdhci_led_control(struct led_classdev *led, 424 enum led_brightness brightness) 425 { 426 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 427 unsigned long flags; 428 429 spin_lock_irqsave(&host->lock, flags); 430 431 if (host->runtime_suspended) 432 goto out; 433 434 if (brightness == LED_OFF) 435 __sdhci_led_deactivate(host); 436 else 437 __sdhci_led_activate(host); 438 out: 439 spin_unlock_irqrestore(&host->lock, flags); 440 } 441 442 static int sdhci_led_register(struct sdhci_host *host) 443 { 444 struct mmc_host *mmc = host->mmc; 445 446 if (host->quirks & SDHCI_QUIRK_NO_LED) 447 return 0; 448 449 snprintf(host->led_name, sizeof(host->led_name), 450 "%s::", mmc_hostname(mmc)); 451 452 host->led.name = host->led_name; 453 host->led.brightness = LED_OFF; 454 host->led.default_trigger = mmc_hostname(mmc); 455 host->led.brightness_set = sdhci_led_control; 456 457 return led_classdev_register(mmc_dev(mmc), &host->led); 458 } 459 460 static void sdhci_led_unregister(struct sdhci_host *host) 461 { 462 if (host->quirks & SDHCI_QUIRK_NO_LED) 463 return; 464 465 led_classdev_unregister(&host->led); 466 } 467 468 static inline void sdhci_led_activate(struct sdhci_host *host) 469 { 470 } 471 472 static inline void sdhci_led_deactivate(struct sdhci_host *host) 473 { 474 } 475 476 #else 477 478 static inline int sdhci_led_register(struct sdhci_host *host) 479 { 480 return 0; 481 } 482 483 static inline void sdhci_led_unregister(struct sdhci_host *host) 484 { 485 } 486 487 static inline void sdhci_led_activate(struct sdhci_host *host) 488 { 489 __sdhci_led_activate(host); 490 } 491 492 static inline void sdhci_led_deactivate(struct sdhci_host *host) 493 { 494 __sdhci_led_deactivate(host); 495 } 496 497 #endif 498 499 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 500 unsigned long timeout) 501 { 502 if (sdhci_data_line_cmd(mrq->cmd)) 503 mod_timer(&host->data_timer, timeout); 504 else 505 mod_timer(&host->timer, timeout); 506 } 507 508 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 509 { 510 if (sdhci_data_line_cmd(mrq->cmd)) 511 del_timer(&host->data_timer); 512 else 513 del_timer(&host->timer); 514 } 515 516 static inline bool sdhci_has_requests(struct sdhci_host *host) 517 { 518 return host->cmd || host->data_cmd; 519 } 520 521 /*****************************************************************************\ 522 * * 523 * Core functions * 524 * * 525 \*****************************************************************************/ 526 527 static void sdhci_read_block_pio(struct sdhci_host *host) 528 { 529 size_t blksize, len, chunk; 530 u32 scratch; 531 u8 *buf; 532 533 DBG("PIO reading\n"); 534 535 blksize = host->data->blksz; 536 chunk = 0; 537 538 while (blksize) { 539 BUG_ON(!sg_miter_next(&host->sg_miter)); 540 541 len = min(host->sg_miter.length, blksize); 542 543 blksize -= len; 544 host->sg_miter.consumed = len; 545 546 buf = host->sg_miter.addr; 547 548 while (len) { 549 if (chunk == 0) { 550 scratch = sdhci_readl(host, SDHCI_BUFFER); 551 chunk = 4; 552 } 553 554 *buf = scratch & 0xFF; 555 556 buf++; 557 scratch >>= 8; 558 chunk--; 559 len--; 560 } 561 } 562 563 sg_miter_stop(&host->sg_miter); 564 } 565 566 static void sdhci_write_block_pio(struct sdhci_host *host) 567 { 568 size_t blksize, len, chunk; 569 u32 scratch; 570 u8 *buf; 571 572 DBG("PIO writing\n"); 573 574 blksize = host->data->blksz; 575 chunk = 0; 576 scratch = 0; 577 578 while (blksize) { 579 BUG_ON(!sg_miter_next(&host->sg_miter)); 580 581 len = min(host->sg_miter.length, blksize); 582 583 blksize -= len; 584 host->sg_miter.consumed = len; 585 586 buf = host->sg_miter.addr; 587 588 while (len) { 589 scratch |= (u32)*buf << (chunk * 8); 590 591 buf++; 592 chunk++; 593 len--; 594 595 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 596 sdhci_writel(host, scratch, SDHCI_BUFFER); 597 chunk = 0; 598 scratch = 0; 599 } 600 } 601 } 602 603 sg_miter_stop(&host->sg_miter); 604 } 605 606 static void sdhci_transfer_pio(struct sdhci_host *host) 607 { 608 u32 mask; 609 610 if (host->blocks == 0) 611 return; 612 613 if (host->data->flags & MMC_DATA_READ) 614 mask = SDHCI_DATA_AVAILABLE; 615 else 616 mask = SDHCI_SPACE_AVAILABLE; 617 618 /* 619 * Some controllers (JMicron JMB38x) mess up the buffer bits 620 * for transfers < 4 bytes. As long as it is just one block, 621 * we can ignore the bits. 622 */ 623 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 624 (host->data->blocks == 1)) 625 mask = ~0; 626 627 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 628 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 629 udelay(100); 630 631 if (host->data->flags & MMC_DATA_READ) 632 sdhci_read_block_pio(host); 633 else 634 sdhci_write_block_pio(host); 635 636 host->blocks--; 637 if (host->blocks == 0) 638 break; 639 } 640 641 DBG("PIO transfer complete.\n"); 642 } 643 644 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 645 struct mmc_data *data, int cookie) 646 { 647 int sg_count; 648 649 /* 650 * If the data buffers are already mapped, return the previous 651 * dma_map_sg() result. 652 */ 653 if (data->host_cookie == COOKIE_PRE_MAPPED) 654 return data->sg_count; 655 656 /* Bounce write requests to the bounce buffer */ 657 if (host->bounce_buffer) { 658 unsigned int length = data->blksz * data->blocks; 659 660 if (length > host->bounce_buffer_size) { 661 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 662 mmc_hostname(host->mmc), length, 663 host->bounce_buffer_size); 664 return -EIO; 665 } 666 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 667 /* Copy the data to the bounce buffer */ 668 if (host->ops->copy_to_bounce_buffer) { 669 host->ops->copy_to_bounce_buffer(host, 670 data, length); 671 } else { 672 sg_copy_to_buffer(data->sg, data->sg_len, 673 host->bounce_buffer, length); 674 } 675 } 676 /* Switch ownership to the DMA */ 677 dma_sync_single_for_device(mmc_dev(host->mmc), 678 host->bounce_addr, 679 host->bounce_buffer_size, 680 mmc_get_dma_dir(data)); 681 /* Just a dummy value */ 682 sg_count = 1; 683 } else { 684 /* Just access the data directly from memory */ 685 sg_count = dma_map_sg(mmc_dev(host->mmc), 686 data->sg, data->sg_len, 687 mmc_get_dma_dir(data)); 688 } 689 690 if (sg_count == 0) 691 return -ENOSPC; 692 693 data->sg_count = sg_count; 694 data->host_cookie = cookie; 695 696 return sg_count; 697 } 698 699 static char *sdhci_kmap_atomic(struct scatterlist *sg) 700 { 701 return kmap_local_page(sg_page(sg)) + sg->offset; 702 } 703 704 static void sdhci_kunmap_atomic(void *buffer) 705 { 706 kunmap_local(buffer); 707 } 708 709 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 710 dma_addr_t addr, int len, unsigned int cmd) 711 { 712 struct sdhci_adma2_64_desc *dma_desc = *desc; 713 714 /* 32-bit and 64-bit descriptors have these members in same position */ 715 dma_desc->cmd = cpu_to_le16(cmd); 716 dma_desc->len = cpu_to_le16(len); 717 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 718 719 if (host->flags & SDHCI_USE_64_BIT_DMA) 720 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 721 722 *desc += host->desc_sz; 723 } 724 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 725 726 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 727 void **desc, dma_addr_t addr, 728 int len, unsigned int cmd) 729 { 730 if (host->ops->adma_write_desc) 731 host->ops->adma_write_desc(host, desc, addr, len, cmd); 732 else 733 sdhci_adma_write_desc(host, desc, addr, len, cmd); 734 } 735 736 static void sdhci_adma_mark_end(void *desc) 737 { 738 struct sdhci_adma2_64_desc *dma_desc = desc; 739 740 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 741 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 742 } 743 744 static void sdhci_adma_table_pre(struct sdhci_host *host, 745 struct mmc_data *data, int sg_count) 746 { 747 struct scatterlist *sg; 748 dma_addr_t addr, align_addr; 749 void *desc, *align; 750 char *buffer; 751 int len, offset, i; 752 753 /* 754 * The spec does not specify endianness of descriptor table. 755 * We currently guess that it is LE. 756 */ 757 758 host->sg_count = sg_count; 759 760 desc = host->adma_table; 761 align = host->align_buffer; 762 763 align_addr = host->align_addr; 764 765 for_each_sg(data->sg, sg, host->sg_count, i) { 766 addr = sg_dma_address(sg); 767 len = sg_dma_len(sg); 768 769 /* 770 * The SDHCI specification states that ADMA addresses must 771 * be 32-bit aligned. If they aren't, then we use a bounce 772 * buffer for the (up to three) bytes that screw up the 773 * alignment. 774 */ 775 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 776 SDHCI_ADMA2_MASK; 777 if (offset) { 778 if (data->flags & MMC_DATA_WRITE) { 779 buffer = sdhci_kmap_atomic(sg); 780 memcpy(align, buffer, offset); 781 sdhci_kunmap_atomic(buffer); 782 } 783 784 /* tran, valid */ 785 __sdhci_adma_write_desc(host, &desc, align_addr, 786 offset, ADMA2_TRAN_VALID); 787 788 BUG_ON(offset > 65536); 789 790 align += SDHCI_ADMA2_ALIGN; 791 align_addr += SDHCI_ADMA2_ALIGN; 792 793 addr += offset; 794 len -= offset; 795 } 796 797 /* 798 * The block layer forces a minimum segment size of PAGE_SIZE, 799 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 800 * multiple descriptors, noting that the ADMA table is sized 801 * for 4KiB chunks anyway, so it will be big enough. 802 */ 803 while (len > host->max_adma) { 804 int n = 32 * 1024; /* 32KiB*/ 805 806 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 807 addr += n; 808 len -= n; 809 } 810 811 /* tran, valid */ 812 if (len) 813 __sdhci_adma_write_desc(host, &desc, addr, len, 814 ADMA2_TRAN_VALID); 815 816 /* 817 * If this triggers then we have a calculation bug 818 * somewhere. :/ 819 */ 820 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 821 } 822 823 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 824 /* Mark the last descriptor as the terminating descriptor */ 825 if (desc != host->adma_table) { 826 desc -= host->desc_sz; 827 sdhci_adma_mark_end(desc); 828 } 829 } else { 830 /* Add a terminating entry - nop, end, valid */ 831 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 832 } 833 } 834 835 static void sdhci_adma_table_post(struct sdhci_host *host, 836 struct mmc_data *data) 837 { 838 struct scatterlist *sg; 839 int i, size; 840 void *align; 841 char *buffer; 842 843 if (data->flags & MMC_DATA_READ) { 844 bool has_unaligned = false; 845 846 /* Do a quick scan of the SG list for any unaligned mappings */ 847 for_each_sg(data->sg, sg, host->sg_count, i) 848 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 849 has_unaligned = true; 850 break; 851 } 852 853 if (has_unaligned) { 854 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 855 data->sg_len, DMA_FROM_DEVICE); 856 857 align = host->align_buffer; 858 859 for_each_sg(data->sg, sg, host->sg_count, i) { 860 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 861 size = SDHCI_ADMA2_ALIGN - 862 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 863 864 buffer = sdhci_kmap_atomic(sg); 865 memcpy(buffer, align, size); 866 sdhci_kunmap_atomic(buffer); 867 868 align += SDHCI_ADMA2_ALIGN; 869 } 870 } 871 } 872 } 873 } 874 875 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 876 { 877 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 878 if (host->flags & SDHCI_USE_64_BIT_DMA) 879 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 880 } 881 882 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 883 { 884 if (host->bounce_buffer) 885 return host->bounce_addr; 886 else 887 return sg_dma_address(host->data->sg); 888 } 889 890 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 891 { 892 if (host->v4_mode) 893 sdhci_set_adma_addr(host, addr); 894 else 895 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 896 } 897 898 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 899 struct mmc_command *cmd, 900 struct mmc_data *data) 901 { 902 unsigned int target_timeout; 903 904 /* timeout in us */ 905 if (!data) { 906 target_timeout = cmd->busy_timeout * 1000; 907 } else { 908 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 909 if (host->clock && data->timeout_clks) { 910 unsigned long long val; 911 912 /* 913 * data->timeout_clks is in units of clock cycles. 914 * host->clock is in Hz. target_timeout is in us. 915 * Hence, us = 1000000 * cycles / Hz. Round up. 916 */ 917 val = 1000000ULL * data->timeout_clks; 918 if (do_div(val, host->clock)) 919 target_timeout++; 920 target_timeout += val; 921 } 922 } 923 924 return target_timeout; 925 } 926 927 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 928 struct mmc_command *cmd) 929 { 930 struct mmc_data *data = cmd->data; 931 struct mmc_host *mmc = host->mmc; 932 struct mmc_ios *ios = &mmc->ios; 933 unsigned char bus_width = 1 << ios->bus_width; 934 unsigned int blksz; 935 unsigned int freq; 936 u64 target_timeout; 937 u64 transfer_time; 938 939 target_timeout = sdhci_target_timeout(host, cmd, data); 940 target_timeout *= NSEC_PER_USEC; 941 942 if (data) { 943 blksz = data->blksz; 944 freq = mmc->actual_clock ? : host->clock; 945 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 946 do_div(transfer_time, freq); 947 /* multiply by '2' to account for any unknowns */ 948 transfer_time = transfer_time * 2; 949 /* calculate timeout for the entire data */ 950 host->data_timeout = data->blocks * target_timeout + 951 transfer_time; 952 } else { 953 host->data_timeout = target_timeout; 954 } 955 956 if (host->data_timeout) 957 host->data_timeout += MMC_CMD_TRANSFER_TIME; 958 } 959 960 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 961 bool *too_big) 962 { 963 u8 count; 964 struct mmc_data *data; 965 unsigned target_timeout, current_timeout; 966 967 *too_big = false; 968 969 /* 970 * If the host controller provides us with an incorrect timeout 971 * value, just skip the check and use the maximum. The hardware may take 972 * longer to time out, but that's much better than having a too-short 973 * timeout value. 974 */ 975 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 976 return host->max_timeout_count; 977 978 /* Unspecified command, assume max */ 979 if (cmd == NULL) 980 return host->max_timeout_count; 981 982 data = cmd->data; 983 /* Unspecified timeout, assume max */ 984 if (!data && !cmd->busy_timeout) 985 return host->max_timeout_count; 986 987 /* timeout in us */ 988 target_timeout = sdhci_target_timeout(host, cmd, data); 989 990 /* 991 * Figure out needed cycles. 992 * We do this in steps in order to fit inside a 32 bit int. 993 * The first step is the minimum timeout, which will have a 994 * minimum resolution of 6 bits: 995 * (1) 2^13*1000 > 2^22, 996 * (2) host->timeout_clk < 2^16 997 * => 998 * (1) / (2) > 2^6 999 */ 1000 count = 0; 1001 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1002 while (current_timeout < target_timeout) { 1003 count++; 1004 current_timeout <<= 1; 1005 if (count > host->max_timeout_count) { 1006 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1007 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1008 count, cmd->opcode); 1009 count = host->max_timeout_count; 1010 *too_big = true; 1011 break; 1012 } 1013 } 1014 1015 return count; 1016 } 1017 1018 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1019 { 1020 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1021 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1022 1023 if (host->flags & SDHCI_REQ_USE_DMA) 1024 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1025 else 1026 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1027 1028 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1029 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1030 else 1031 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1032 1033 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1034 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1035 } 1036 1037 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1038 { 1039 if (enable) 1040 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1041 else 1042 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1043 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1044 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1045 } 1046 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1047 1048 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1049 { 1050 bool too_big = false; 1051 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1052 1053 if (too_big && 1054 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1055 sdhci_calc_sw_timeout(host, cmd); 1056 sdhci_set_data_timeout_irq(host, false); 1057 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1058 sdhci_set_data_timeout_irq(host, true); 1059 } 1060 1061 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1062 } 1063 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1064 1065 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1066 { 1067 if (host->ops->set_timeout) 1068 host->ops->set_timeout(host, cmd); 1069 else 1070 __sdhci_set_timeout(host, cmd); 1071 } 1072 1073 static void sdhci_initialize_data(struct sdhci_host *host, 1074 struct mmc_data *data) 1075 { 1076 WARN_ON(host->data); 1077 1078 /* Sanity checks */ 1079 BUG_ON(data->blksz * data->blocks > 524288); 1080 BUG_ON(data->blksz > host->mmc->max_blk_size); 1081 BUG_ON(data->blocks > 65535); 1082 1083 host->data = data; 1084 host->data_early = 0; 1085 host->data->bytes_xfered = 0; 1086 } 1087 1088 static inline void sdhci_set_block_info(struct sdhci_host *host, 1089 struct mmc_data *data) 1090 { 1091 /* Set the DMA boundary value and block size */ 1092 sdhci_writew(host, 1093 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1094 SDHCI_BLOCK_SIZE); 1095 /* 1096 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1097 * can be supported, in that case 16-bit block count register must be 0. 1098 */ 1099 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1100 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1101 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1102 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1103 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1104 } else { 1105 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1106 } 1107 } 1108 1109 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1110 { 1111 struct mmc_data *data = cmd->data; 1112 1113 sdhci_initialize_data(host, data); 1114 1115 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1116 struct scatterlist *sg; 1117 unsigned int length_mask, offset_mask; 1118 int i; 1119 1120 host->flags |= SDHCI_REQ_USE_DMA; 1121 1122 /* 1123 * FIXME: This doesn't account for merging when mapping the 1124 * scatterlist. 1125 * 1126 * The assumption here being that alignment and lengths are 1127 * the same after DMA mapping to device address space. 1128 */ 1129 length_mask = 0; 1130 offset_mask = 0; 1131 if (host->flags & SDHCI_USE_ADMA) { 1132 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1133 length_mask = 3; 1134 /* 1135 * As we use up to 3 byte chunks to work 1136 * around alignment problems, we need to 1137 * check the offset as well. 1138 */ 1139 offset_mask = 3; 1140 } 1141 } else { 1142 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1143 length_mask = 3; 1144 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1145 offset_mask = 3; 1146 } 1147 1148 if (unlikely(length_mask | offset_mask)) { 1149 for_each_sg(data->sg, sg, data->sg_len, i) { 1150 if (sg->length & length_mask) { 1151 DBG("Reverting to PIO because of transfer size (%d)\n", 1152 sg->length); 1153 host->flags &= ~SDHCI_REQ_USE_DMA; 1154 break; 1155 } 1156 if (sg->offset & offset_mask) { 1157 DBG("Reverting to PIO because of bad alignment\n"); 1158 host->flags &= ~SDHCI_REQ_USE_DMA; 1159 break; 1160 } 1161 } 1162 } 1163 } 1164 1165 if (host->flags & SDHCI_REQ_USE_DMA) { 1166 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1167 1168 if (sg_cnt <= 0) { 1169 /* 1170 * This only happens when someone fed 1171 * us an invalid request. 1172 */ 1173 WARN_ON(1); 1174 host->flags &= ~SDHCI_REQ_USE_DMA; 1175 } else if (host->flags & SDHCI_USE_ADMA) { 1176 sdhci_adma_table_pre(host, data, sg_cnt); 1177 sdhci_set_adma_addr(host, host->adma_addr); 1178 } else { 1179 WARN_ON(sg_cnt != 1); 1180 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1181 } 1182 } 1183 1184 sdhci_config_dma(host); 1185 1186 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1187 int flags; 1188 1189 flags = SG_MITER_ATOMIC; 1190 if (host->data->flags & MMC_DATA_READ) 1191 flags |= SG_MITER_TO_SG; 1192 else 1193 flags |= SG_MITER_FROM_SG; 1194 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1195 host->blocks = data->blocks; 1196 } 1197 1198 sdhci_set_transfer_irqs(host); 1199 1200 sdhci_set_block_info(host, data); 1201 } 1202 1203 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1204 1205 static int sdhci_external_dma_init(struct sdhci_host *host) 1206 { 1207 int ret = 0; 1208 struct mmc_host *mmc = host->mmc; 1209 1210 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1211 if (IS_ERR(host->tx_chan)) { 1212 ret = PTR_ERR(host->tx_chan); 1213 if (ret != -EPROBE_DEFER) 1214 pr_warn("Failed to request TX DMA channel.\n"); 1215 host->tx_chan = NULL; 1216 return ret; 1217 } 1218 1219 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1220 if (IS_ERR(host->rx_chan)) { 1221 if (host->tx_chan) { 1222 dma_release_channel(host->tx_chan); 1223 host->tx_chan = NULL; 1224 } 1225 1226 ret = PTR_ERR(host->rx_chan); 1227 if (ret != -EPROBE_DEFER) 1228 pr_warn("Failed to request RX DMA channel.\n"); 1229 host->rx_chan = NULL; 1230 } 1231 1232 return ret; 1233 } 1234 1235 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1236 struct mmc_data *data) 1237 { 1238 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1239 } 1240 1241 static int sdhci_external_dma_setup(struct sdhci_host *host, 1242 struct mmc_command *cmd) 1243 { 1244 int ret, i; 1245 enum dma_transfer_direction dir; 1246 struct dma_async_tx_descriptor *desc; 1247 struct mmc_data *data = cmd->data; 1248 struct dma_chan *chan; 1249 struct dma_slave_config cfg; 1250 dma_cookie_t cookie; 1251 int sg_cnt; 1252 1253 if (!host->mapbase) 1254 return -EINVAL; 1255 1256 memset(&cfg, 0, sizeof(cfg)); 1257 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1258 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1259 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1260 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1261 cfg.src_maxburst = data->blksz / 4; 1262 cfg.dst_maxburst = data->blksz / 4; 1263 1264 /* Sanity check: all the SG entries must be aligned by block size. */ 1265 for (i = 0; i < data->sg_len; i++) { 1266 if ((data->sg + i)->length % data->blksz) 1267 return -EINVAL; 1268 } 1269 1270 chan = sdhci_external_dma_channel(host, data); 1271 1272 ret = dmaengine_slave_config(chan, &cfg); 1273 if (ret) 1274 return ret; 1275 1276 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1277 if (sg_cnt <= 0) 1278 return -EINVAL; 1279 1280 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1281 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1282 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1283 if (!desc) 1284 return -EINVAL; 1285 1286 desc->callback = NULL; 1287 desc->callback_param = NULL; 1288 1289 cookie = dmaengine_submit(desc); 1290 if (dma_submit_error(cookie)) 1291 ret = cookie; 1292 1293 return ret; 1294 } 1295 1296 static void sdhci_external_dma_release(struct sdhci_host *host) 1297 { 1298 if (host->tx_chan) { 1299 dma_release_channel(host->tx_chan); 1300 host->tx_chan = NULL; 1301 } 1302 1303 if (host->rx_chan) { 1304 dma_release_channel(host->rx_chan); 1305 host->rx_chan = NULL; 1306 } 1307 1308 sdhci_switch_external_dma(host, false); 1309 } 1310 1311 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1312 struct mmc_command *cmd) 1313 { 1314 struct mmc_data *data = cmd->data; 1315 1316 sdhci_initialize_data(host, data); 1317 1318 host->flags |= SDHCI_REQ_USE_DMA; 1319 sdhci_set_transfer_irqs(host); 1320 1321 sdhci_set_block_info(host, data); 1322 } 1323 1324 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1325 struct mmc_command *cmd) 1326 { 1327 if (!sdhci_external_dma_setup(host, cmd)) { 1328 __sdhci_external_dma_prepare_data(host, cmd); 1329 } else { 1330 sdhci_external_dma_release(host); 1331 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1332 mmc_hostname(host->mmc)); 1333 sdhci_prepare_data(host, cmd); 1334 } 1335 } 1336 1337 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1338 struct mmc_command *cmd) 1339 { 1340 struct dma_chan *chan; 1341 1342 if (!cmd->data) 1343 return; 1344 1345 chan = sdhci_external_dma_channel(host, cmd->data); 1346 if (chan) 1347 dma_async_issue_pending(chan); 1348 } 1349 1350 #else 1351 1352 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1353 { 1354 return -EOPNOTSUPP; 1355 } 1356 1357 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1358 { 1359 } 1360 1361 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1362 struct mmc_command *cmd) 1363 { 1364 /* This should never happen */ 1365 WARN_ON_ONCE(1); 1366 } 1367 1368 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1369 struct mmc_command *cmd) 1370 { 1371 } 1372 1373 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1374 struct mmc_data *data) 1375 { 1376 return NULL; 1377 } 1378 1379 #endif 1380 1381 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1382 { 1383 host->use_external_dma = en; 1384 } 1385 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1386 1387 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1388 struct mmc_request *mrq) 1389 { 1390 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1391 !mrq->cap_cmd_during_tfr; 1392 } 1393 1394 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1395 struct mmc_request *mrq) 1396 { 1397 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1398 } 1399 1400 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1401 struct mmc_request *mrq) 1402 { 1403 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1404 } 1405 1406 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1407 struct mmc_command *cmd, 1408 u16 *mode) 1409 { 1410 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1411 (cmd->opcode != SD_IO_RW_EXTENDED); 1412 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1413 u16 ctrl2; 1414 1415 /* 1416 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1417 * Select' is recommended rather than use of 'Auto CMD12 1418 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1419 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1420 */ 1421 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1422 (use_cmd12 || use_cmd23)) { 1423 *mode |= SDHCI_TRNS_AUTO_SEL; 1424 1425 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1426 if (use_cmd23) 1427 ctrl2 |= SDHCI_CMD23_ENABLE; 1428 else 1429 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1430 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1431 1432 return; 1433 } 1434 1435 /* 1436 * If we are sending CMD23, CMD12 never gets sent 1437 * on successful completion (so no Auto-CMD12). 1438 */ 1439 if (use_cmd12) 1440 *mode |= SDHCI_TRNS_AUTO_CMD12; 1441 else if (use_cmd23) 1442 *mode |= SDHCI_TRNS_AUTO_CMD23; 1443 } 1444 1445 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1446 struct mmc_command *cmd) 1447 { 1448 u16 mode = 0; 1449 struct mmc_data *data = cmd->data; 1450 1451 if (data == NULL) { 1452 if (host->quirks2 & 1453 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1454 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1455 if (!mmc_op_tuning(cmd->opcode)) 1456 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1457 } else { 1458 /* clear Auto CMD settings for no data CMDs */ 1459 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1460 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1461 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1462 } 1463 return; 1464 } 1465 1466 WARN_ON(!host->data); 1467 1468 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1469 mode = SDHCI_TRNS_BLK_CNT_EN; 1470 1471 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1472 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1473 sdhci_auto_cmd_select(host, cmd, &mode); 1474 if (sdhci_auto_cmd23(host, cmd->mrq)) 1475 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1476 } 1477 1478 if (data->flags & MMC_DATA_READ) 1479 mode |= SDHCI_TRNS_READ; 1480 if (host->flags & SDHCI_REQ_USE_DMA) 1481 mode |= SDHCI_TRNS_DMA; 1482 1483 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1484 } 1485 1486 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1487 { 1488 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1489 ((mrq->cmd && mrq->cmd->error) || 1490 (mrq->sbc && mrq->sbc->error) || 1491 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1492 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1493 } 1494 1495 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1496 { 1497 int i; 1498 1499 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1500 if (host->mrqs_done[i] == mrq) { 1501 WARN_ON(1); 1502 return; 1503 } 1504 } 1505 1506 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1507 if (!host->mrqs_done[i]) { 1508 host->mrqs_done[i] = mrq; 1509 break; 1510 } 1511 } 1512 1513 WARN_ON(i >= SDHCI_MAX_MRQS); 1514 } 1515 1516 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1517 { 1518 if (host->cmd && host->cmd->mrq == mrq) 1519 host->cmd = NULL; 1520 1521 if (host->data_cmd && host->data_cmd->mrq == mrq) 1522 host->data_cmd = NULL; 1523 1524 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1525 host->deferred_cmd = NULL; 1526 1527 if (host->data && host->data->mrq == mrq) 1528 host->data = NULL; 1529 1530 if (sdhci_needs_reset(host, mrq)) 1531 host->pending_reset = true; 1532 1533 sdhci_set_mrq_done(host, mrq); 1534 1535 sdhci_del_timer(host, mrq); 1536 1537 if (!sdhci_has_requests(host)) 1538 sdhci_led_deactivate(host); 1539 } 1540 1541 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1542 { 1543 __sdhci_finish_mrq(host, mrq); 1544 1545 queue_work(host->complete_wq, &host->complete_work); 1546 } 1547 1548 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1549 { 1550 struct mmc_command *data_cmd = host->data_cmd; 1551 struct mmc_data *data = host->data; 1552 1553 host->data = NULL; 1554 host->data_cmd = NULL; 1555 1556 /* 1557 * The controller needs a reset of internal state machines upon error 1558 * conditions. 1559 */ 1560 if (data->error) { 1561 if (!host->cmd || host->cmd == data_cmd) 1562 sdhci_reset_for(host, REQUEST_ERROR); 1563 else 1564 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1565 } 1566 1567 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1568 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1569 sdhci_adma_table_post(host, data); 1570 1571 /* 1572 * The specification states that the block count register must 1573 * be updated, but it does not specify at what point in the 1574 * data flow. That makes the register entirely useless to read 1575 * back so we have to assume that nothing made it to the card 1576 * in the event of an error. 1577 */ 1578 if (data->error) 1579 data->bytes_xfered = 0; 1580 else 1581 data->bytes_xfered = data->blksz * data->blocks; 1582 1583 /* 1584 * Need to send CMD12 if - 1585 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1586 * b) error in multiblock transfer 1587 */ 1588 if (data->stop && 1589 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1590 data->error)) { 1591 /* 1592 * 'cap_cmd_during_tfr' request must not use the command line 1593 * after mmc_command_done() has been called. It is upper layer's 1594 * responsibility to send the stop command if required. 1595 */ 1596 if (data->mrq->cap_cmd_during_tfr) { 1597 __sdhci_finish_mrq(host, data->mrq); 1598 } else { 1599 /* Avoid triggering warning in sdhci_send_command() */ 1600 host->cmd = NULL; 1601 if (!sdhci_send_command(host, data->stop)) { 1602 if (sw_data_timeout) { 1603 /* 1604 * This is anyway a sw data timeout, so 1605 * give up now. 1606 */ 1607 data->stop->error = -EIO; 1608 __sdhci_finish_mrq(host, data->mrq); 1609 } else { 1610 WARN_ON(host->deferred_cmd); 1611 host->deferred_cmd = data->stop; 1612 } 1613 } 1614 } 1615 } else { 1616 __sdhci_finish_mrq(host, data->mrq); 1617 } 1618 } 1619 1620 static void sdhci_finish_data(struct sdhci_host *host) 1621 { 1622 __sdhci_finish_data(host, false); 1623 } 1624 1625 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1626 { 1627 int flags; 1628 u32 mask; 1629 unsigned long timeout; 1630 1631 WARN_ON(host->cmd); 1632 1633 /* Initially, a command has no error */ 1634 cmd->error = 0; 1635 1636 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1637 cmd->opcode == MMC_STOP_TRANSMISSION) 1638 cmd->flags |= MMC_RSP_BUSY; 1639 1640 mask = SDHCI_CMD_INHIBIT; 1641 if (sdhci_data_line_cmd(cmd)) 1642 mask |= SDHCI_DATA_INHIBIT; 1643 1644 /* We shouldn't wait for data inihibit for stop commands, even 1645 though they might use busy signaling */ 1646 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1647 mask &= ~SDHCI_DATA_INHIBIT; 1648 1649 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1650 return false; 1651 1652 host->cmd = cmd; 1653 host->data_timeout = 0; 1654 if (sdhci_data_line_cmd(cmd)) { 1655 WARN_ON(host->data_cmd); 1656 host->data_cmd = cmd; 1657 sdhci_set_timeout(host, cmd); 1658 } 1659 1660 if (cmd->data) { 1661 if (host->use_external_dma) 1662 sdhci_external_dma_prepare_data(host, cmd); 1663 else 1664 sdhci_prepare_data(host, cmd); 1665 } 1666 1667 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1668 1669 sdhci_set_transfer_mode(host, cmd); 1670 1671 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1672 WARN_ONCE(1, "Unsupported response type!\n"); 1673 /* 1674 * This does not happen in practice because 136-bit response 1675 * commands never have busy waiting, so rather than complicate 1676 * the error path, just remove busy waiting and continue. 1677 */ 1678 cmd->flags &= ~MMC_RSP_BUSY; 1679 } 1680 1681 if (!(cmd->flags & MMC_RSP_PRESENT)) 1682 flags = SDHCI_CMD_RESP_NONE; 1683 else if (cmd->flags & MMC_RSP_136) 1684 flags = SDHCI_CMD_RESP_LONG; 1685 else if (cmd->flags & MMC_RSP_BUSY) 1686 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1687 else 1688 flags = SDHCI_CMD_RESP_SHORT; 1689 1690 if (cmd->flags & MMC_RSP_CRC) 1691 flags |= SDHCI_CMD_CRC; 1692 if (cmd->flags & MMC_RSP_OPCODE) 1693 flags |= SDHCI_CMD_INDEX; 1694 1695 /* CMD19 is special in that the Data Present Select should be set */ 1696 if (cmd->data || mmc_op_tuning(cmd->opcode)) 1697 flags |= SDHCI_CMD_DATA; 1698 1699 timeout = jiffies; 1700 if (host->data_timeout) 1701 timeout += nsecs_to_jiffies(host->data_timeout); 1702 else if (!cmd->data && cmd->busy_timeout > 9000) 1703 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1704 else 1705 timeout += 10 * HZ; 1706 sdhci_mod_timer(host, cmd->mrq, timeout); 1707 1708 if (host->use_external_dma) 1709 sdhci_external_dma_pre_transfer(host, cmd); 1710 1711 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1712 1713 return true; 1714 } 1715 1716 static bool sdhci_present_error(struct sdhci_host *host, 1717 struct mmc_command *cmd, bool present) 1718 { 1719 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1720 cmd->error = -ENOMEDIUM; 1721 return true; 1722 } 1723 1724 return false; 1725 } 1726 1727 static bool sdhci_send_command_retry(struct sdhci_host *host, 1728 struct mmc_command *cmd, 1729 unsigned long flags) 1730 __releases(host->lock) 1731 __acquires(host->lock) 1732 { 1733 struct mmc_command *deferred_cmd = host->deferred_cmd; 1734 int timeout = 10; /* Approx. 10 ms */ 1735 bool present; 1736 1737 while (!sdhci_send_command(host, cmd)) { 1738 if (!timeout--) { 1739 pr_err("%s: Controller never released inhibit bit(s).\n", 1740 mmc_hostname(host->mmc)); 1741 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1742 sdhci_dumpregs(host); 1743 cmd->error = -EIO; 1744 return false; 1745 } 1746 1747 spin_unlock_irqrestore(&host->lock, flags); 1748 1749 usleep_range(1000, 1250); 1750 1751 present = host->mmc->ops->get_cd(host->mmc); 1752 1753 spin_lock_irqsave(&host->lock, flags); 1754 1755 /* A deferred command might disappear, handle that */ 1756 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1757 return true; 1758 1759 if (sdhci_present_error(host, cmd, present)) 1760 return false; 1761 } 1762 1763 if (cmd == host->deferred_cmd) 1764 host->deferred_cmd = NULL; 1765 1766 return true; 1767 } 1768 1769 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1770 { 1771 int i, reg; 1772 1773 for (i = 0; i < 4; i++) { 1774 reg = SDHCI_RESPONSE + (3 - i) * 4; 1775 cmd->resp[i] = sdhci_readl(host, reg); 1776 } 1777 1778 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1779 return; 1780 1781 /* CRC is stripped so we need to do some shifting */ 1782 for (i = 0; i < 4; i++) { 1783 cmd->resp[i] <<= 8; 1784 if (i != 3) 1785 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1786 } 1787 } 1788 1789 static void sdhci_finish_command(struct sdhci_host *host) 1790 { 1791 struct mmc_command *cmd = host->cmd; 1792 1793 host->cmd = NULL; 1794 1795 if (cmd->flags & MMC_RSP_PRESENT) { 1796 if (cmd->flags & MMC_RSP_136) { 1797 sdhci_read_rsp_136(host, cmd); 1798 } else { 1799 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1800 } 1801 } 1802 1803 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1804 mmc_command_done(host->mmc, cmd->mrq); 1805 1806 /* 1807 * The host can send and interrupt when the busy state has 1808 * ended, allowing us to wait without wasting CPU cycles. 1809 * The busy signal uses DAT0 so this is similar to waiting 1810 * for data to complete. 1811 * 1812 * Note: The 1.0 specification is a bit ambiguous about this 1813 * feature so there might be some problems with older 1814 * controllers. 1815 */ 1816 if (cmd->flags & MMC_RSP_BUSY) { 1817 if (cmd->data) { 1818 DBG("Cannot wait for busy signal when also doing a data transfer"); 1819 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1820 cmd == host->data_cmd) { 1821 /* Command complete before busy is ended */ 1822 return; 1823 } 1824 } 1825 1826 /* Finished CMD23, now send actual command. */ 1827 if (cmd == cmd->mrq->sbc) { 1828 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1829 WARN_ON(host->deferred_cmd); 1830 host->deferred_cmd = cmd->mrq->cmd; 1831 } 1832 } else { 1833 1834 /* Processed actual command. */ 1835 if (host->data && host->data_early) 1836 sdhci_finish_data(host); 1837 1838 if (!cmd->data) 1839 __sdhci_finish_mrq(host, cmd->mrq); 1840 } 1841 } 1842 1843 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1844 { 1845 u16 preset = 0; 1846 1847 switch (host->timing) { 1848 case MMC_TIMING_MMC_HS: 1849 case MMC_TIMING_SD_HS: 1850 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1851 break; 1852 case MMC_TIMING_UHS_SDR12: 1853 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1854 break; 1855 case MMC_TIMING_UHS_SDR25: 1856 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1857 break; 1858 case MMC_TIMING_UHS_SDR50: 1859 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1860 break; 1861 case MMC_TIMING_UHS_SDR104: 1862 case MMC_TIMING_MMC_HS200: 1863 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1864 break; 1865 case MMC_TIMING_UHS_DDR50: 1866 case MMC_TIMING_MMC_DDR52: 1867 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1868 break; 1869 case MMC_TIMING_MMC_HS400: 1870 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1871 break; 1872 default: 1873 pr_warn("%s: Invalid UHS-I mode selected\n", 1874 mmc_hostname(host->mmc)); 1875 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1876 break; 1877 } 1878 return preset; 1879 } 1880 1881 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1882 unsigned int *actual_clock) 1883 { 1884 int div = 0; /* Initialized for compiler warning */ 1885 int real_div = div, clk_mul = 1; 1886 u16 clk = 0; 1887 bool switch_base_clk = false; 1888 1889 if (host->version >= SDHCI_SPEC_300) { 1890 if (host->preset_enabled) { 1891 u16 pre_val; 1892 1893 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1894 pre_val = sdhci_get_preset_value(host); 1895 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1896 if (host->clk_mul && 1897 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1898 clk = SDHCI_PROG_CLOCK_MODE; 1899 real_div = div + 1; 1900 clk_mul = host->clk_mul; 1901 } else { 1902 real_div = max_t(int, 1, div << 1); 1903 } 1904 goto clock_set; 1905 } 1906 1907 /* 1908 * Check if the Host Controller supports Programmable Clock 1909 * Mode. 1910 */ 1911 if (host->clk_mul) { 1912 for (div = 1; div <= 1024; div++) { 1913 if ((host->max_clk * host->clk_mul / div) 1914 <= clock) 1915 break; 1916 } 1917 if ((host->max_clk * host->clk_mul / div) <= clock) { 1918 /* 1919 * Set Programmable Clock Mode in the Clock 1920 * Control register. 1921 */ 1922 clk = SDHCI_PROG_CLOCK_MODE; 1923 real_div = div; 1924 clk_mul = host->clk_mul; 1925 div--; 1926 } else { 1927 /* 1928 * Divisor can be too small to reach clock 1929 * speed requirement. Then use the base clock. 1930 */ 1931 switch_base_clk = true; 1932 } 1933 } 1934 1935 if (!host->clk_mul || switch_base_clk) { 1936 /* Version 3.00 divisors must be a multiple of 2. */ 1937 if (host->max_clk <= clock) 1938 div = 1; 1939 else { 1940 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1941 div += 2) { 1942 if ((host->max_clk / div) <= clock) 1943 break; 1944 } 1945 } 1946 real_div = div; 1947 div >>= 1; 1948 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1949 && !div && host->max_clk <= 25000000) 1950 div = 1; 1951 } 1952 } else { 1953 /* Version 2.00 divisors must be a power of 2. */ 1954 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1955 if ((host->max_clk / div) <= clock) 1956 break; 1957 } 1958 real_div = div; 1959 div >>= 1; 1960 } 1961 1962 clock_set: 1963 if (real_div) 1964 *actual_clock = (host->max_clk * clk_mul) / real_div; 1965 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1966 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1967 << SDHCI_DIVIDER_HI_SHIFT; 1968 1969 return clk; 1970 } 1971 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1972 1973 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1974 { 1975 ktime_t timeout; 1976 1977 clk |= SDHCI_CLOCK_INT_EN; 1978 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1979 1980 /* Wait max 150 ms */ 1981 timeout = ktime_add_ms(ktime_get(), 150); 1982 while (1) { 1983 bool timedout = ktime_after(ktime_get(), timeout); 1984 1985 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1986 if (clk & SDHCI_CLOCK_INT_STABLE) 1987 break; 1988 if (timedout) { 1989 pr_err("%s: Internal clock never stabilised.\n", 1990 mmc_hostname(host->mmc)); 1991 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1992 sdhci_dumpregs(host); 1993 return; 1994 } 1995 udelay(10); 1996 } 1997 1998 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 1999 clk |= SDHCI_CLOCK_PLL_EN; 2000 clk &= ~SDHCI_CLOCK_INT_STABLE; 2001 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2002 2003 /* Wait max 150 ms */ 2004 timeout = ktime_add_ms(ktime_get(), 150); 2005 while (1) { 2006 bool timedout = ktime_after(ktime_get(), timeout); 2007 2008 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2009 if (clk & SDHCI_CLOCK_INT_STABLE) 2010 break; 2011 if (timedout) { 2012 pr_err("%s: PLL clock never stabilised.\n", 2013 mmc_hostname(host->mmc)); 2014 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2015 sdhci_dumpregs(host); 2016 return; 2017 } 2018 udelay(10); 2019 } 2020 } 2021 2022 clk |= SDHCI_CLOCK_CARD_EN; 2023 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2024 } 2025 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2026 2027 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2028 { 2029 u16 clk; 2030 2031 host->mmc->actual_clock = 0; 2032 2033 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2034 2035 if (clock == 0) 2036 return; 2037 2038 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2039 sdhci_enable_clk(host, clk); 2040 } 2041 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2042 2043 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2044 unsigned short vdd) 2045 { 2046 struct mmc_host *mmc = host->mmc; 2047 2048 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2049 2050 if (mode != MMC_POWER_OFF) 2051 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2052 else 2053 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2054 } 2055 2056 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2057 unsigned short vdd) 2058 { 2059 u8 pwr = 0; 2060 2061 if (mode != MMC_POWER_OFF) { 2062 switch (1 << vdd) { 2063 case MMC_VDD_165_195: 2064 /* 2065 * Without a regulator, SDHCI does not support 2.0v 2066 * so we only get here if the driver deliberately 2067 * added the 2.0v range to ocr_avail. Map it to 1.8v 2068 * for the purpose of turning on the power. 2069 */ 2070 case MMC_VDD_20_21: 2071 pwr = SDHCI_POWER_180; 2072 break; 2073 case MMC_VDD_29_30: 2074 case MMC_VDD_30_31: 2075 pwr = SDHCI_POWER_300; 2076 break; 2077 case MMC_VDD_32_33: 2078 case MMC_VDD_33_34: 2079 /* 2080 * 3.4 ~ 3.6V are valid only for those platforms where it's 2081 * known that the voltage range is supported by hardware. 2082 */ 2083 case MMC_VDD_34_35: 2084 case MMC_VDD_35_36: 2085 pwr = SDHCI_POWER_330; 2086 break; 2087 default: 2088 WARN(1, "%s: Invalid vdd %#x\n", 2089 mmc_hostname(host->mmc), vdd); 2090 break; 2091 } 2092 } 2093 2094 if (host->pwr == pwr) 2095 return; 2096 2097 host->pwr = pwr; 2098 2099 if (pwr == 0) { 2100 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2101 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2102 sdhci_runtime_pm_bus_off(host); 2103 } else { 2104 /* 2105 * Spec says that we should clear the power reg before setting 2106 * a new value. Some controllers don't seem to like this though. 2107 */ 2108 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2109 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2110 2111 /* 2112 * At least the Marvell CaFe chip gets confused if we set the 2113 * voltage and set turn on power at the same time, so set the 2114 * voltage first. 2115 */ 2116 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2117 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2118 2119 pwr |= SDHCI_POWER_ON; 2120 2121 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2122 2123 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2124 sdhci_runtime_pm_bus_on(host); 2125 2126 /* 2127 * Some controllers need an extra 10ms delay of 10ms before 2128 * they can apply clock after applying power 2129 */ 2130 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2131 mdelay(10); 2132 } 2133 } 2134 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2135 2136 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2137 unsigned short vdd) 2138 { 2139 if (IS_ERR(host->mmc->supply.vmmc)) 2140 sdhci_set_power_noreg(host, mode, vdd); 2141 else 2142 sdhci_set_power_reg(host, mode, vdd); 2143 } 2144 EXPORT_SYMBOL_GPL(sdhci_set_power); 2145 2146 /* 2147 * Some controllers need to configure a valid bus voltage on their power 2148 * register regardless of whether an external regulator is taking care of power 2149 * supply. This helper function takes care of it if set as the controller's 2150 * sdhci_ops.set_power callback. 2151 */ 2152 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2153 unsigned char mode, 2154 unsigned short vdd) 2155 { 2156 if (!IS_ERR(host->mmc->supply.vmmc)) { 2157 struct mmc_host *mmc = host->mmc; 2158 2159 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2160 } 2161 sdhci_set_power_noreg(host, mode, vdd); 2162 } 2163 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2164 2165 /*****************************************************************************\ 2166 * * 2167 * MMC callbacks * 2168 * * 2169 \*****************************************************************************/ 2170 2171 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2172 { 2173 struct sdhci_host *host = mmc_priv(mmc); 2174 struct mmc_command *cmd; 2175 unsigned long flags; 2176 bool present; 2177 2178 /* Firstly check card presence */ 2179 present = mmc->ops->get_cd(mmc); 2180 2181 spin_lock_irqsave(&host->lock, flags); 2182 2183 sdhci_led_activate(host); 2184 2185 if (sdhci_present_error(host, mrq->cmd, present)) 2186 goto out_finish; 2187 2188 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2189 2190 if (!sdhci_send_command_retry(host, cmd, flags)) 2191 goto out_finish; 2192 2193 spin_unlock_irqrestore(&host->lock, flags); 2194 2195 return; 2196 2197 out_finish: 2198 sdhci_finish_mrq(host, mrq); 2199 spin_unlock_irqrestore(&host->lock, flags); 2200 } 2201 EXPORT_SYMBOL_GPL(sdhci_request); 2202 2203 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2204 { 2205 struct sdhci_host *host = mmc_priv(mmc); 2206 struct mmc_command *cmd; 2207 unsigned long flags; 2208 int ret = 0; 2209 2210 spin_lock_irqsave(&host->lock, flags); 2211 2212 if (sdhci_present_error(host, mrq->cmd, true)) { 2213 sdhci_finish_mrq(host, mrq); 2214 goto out_finish; 2215 } 2216 2217 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2218 2219 /* 2220 * The HSQ may send a command in interrupt context without polling 2221 * the busy signaling, which means we should return BUSY if controller 2222 * has not released inhibit bits to allow HSQ trying to send request 2223 * again in non-atomic context. So we should not finish this request 2224 * here. 2225 */ 2226 if (!sdhci_send_command(host, cmd)) 2227 ret = -EBUSY; 2228 else 2229 sdhci_led_activate(host); 2230 2231 out_finish: 2232 spin_unlock_irqrestore(&host->lock, flags); 2233 return ret; 2234 } 2235 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2236 2237 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2238 { 2239 u8 ctrl; 2240 2241 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2242 if (width == MMC_BUS_WIDTH_8) { 2243 ctrl &= ~SDHCI_CTRL_4BITBUS; 2244 ctrl |= SDHCI_CTRL_8BITBUS; 2245 } else { 2246 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2247 ctrl &= ~SDHCI_CTRL_8BITBUS; 2248 if (width == MMC_BUS_WIDTH_4) 2249 ctrl |= SDHCI_CTRL_4BITBUS; 2250 else 2251 ctrl &= ~SDHCI_CTRL_4BITBUS; 2252 } 2253 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2254 } 2255 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2256 2257 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2258 { 2259 u16 ctrl_2; 2260 2261 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2262 /* Select Bus Speed Mode for host */ 2263 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2264 if ((timing == MMC_TIMING_MMC_HS200) || 2265 (timing == MMC_TIMING_UHS_SDR104)) 2266 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2267 else if (timing == MMC_TIMING_UHS_SDR12) 2268 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2269 else if (timing == MMC_TIMING_UHS_SDR25) 2270 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2271 else if (timing == MMC_TIMING_UHS_SDR50) 2272 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2273 else if ((timing == MMC_TIMING_UHS_DDR50) || 2274 (timing == MMC_TIMING_MMC_DDR52)) 2275 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2276 else if (timing == MMC_TIMING_MMC_HS400) 2277 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2278 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2279 } 2280 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2281 2282 static bool sdhci_timing_has_preset(unsigned char timing) 2283 { 2284 switch (timing) { 2285 case MMC_TIMING_UHS_SDR12: 2286 case MMC_TIMING_UHS_SDR25: 2287 case MMC_TIMING_UHS_SDR50: 2288 case MMC_TIMING_UHS_SDR104: 2289 case MMC_TIMING_UHS_DDR50: 2290 case MMC_TIMING_MMC_DDR52: 2291 return true; 2292 }; 2293 return false; 2294 } 2295 2296 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) 2297 { 2298 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2299 sdhci_timing_has_preset(timing); 2300 } 2301 2302 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) 2303 { 2304 /* 2305 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK 2306 * Frequency. Check if preset values need to be enabled, or the Driver 2307 * Strength needs updating. Note, clock changes are handled separately. 2308 */ 2309 return !host->preset_enabled && 2310 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); 2311 } 2312 2313 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2314 { 2315 struct sdhci_host *host = mmc_priv(mmc); 2316 bool reinit_uhs = host->reinit_uhs; 2317 bool turning_on_clk = false; 2318 u8 ctrl; 2319 2320 host->reinit_uhs = false; 2321 2322 if (ios->power_mode == MMC_POWER_UNDEFINED) 2323 return; 2324 2325 if (host->flags & SDHCI_DEVICE_DEAD) { 2326 if (!IS_ERR(mmc->supply.vmmc) && 2327 ios->power_mode == MMC_POWER_OFF) 2328 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2329 return; 2330 } 2331 2332 /* 2333 * Reset the chip on each power off. 2334 * Should clear out any weird states. 2335 */ 2336 if (ios->power_mode == MMC_POWER_OFF) { 2337 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2338 sdhci_reinit(host); 2339 } 2340 2341 if (host->version >= SDHCI_SPEC_300 && 2342 (ios->power_mode == MMC_POWER_UP) && 2343 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2344 sdhci_enable_preset_value(host, false); 2345 2346 if (!ios->clock || ios->clock != host->clock) { 2347 turning_on_clk = ios->clock && !host->clock; 2348 2349 host->ops->set_clock(host, ios->clock); 2350 host->clock = ios->clock; 2351 2352 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2353 host->clock) { 2354 host->timeout_clk = mmc->actual_clock ? 2355 mmc->actual_clock / 1000 : 2356 host->clock / 1000; 2357 mmc->max_busy_timeout = 2358 host->ops->get_max_timeout_count ? 2359 host->ops->get_max_timeout_count(host) : 2360 1 << 27; 2361 mmc->max_busy_timeout /= host->timeout_clk; 2362 } 2363 } 2364 2365 if (host->ops->set_power) 2366 host->ops->set_power(host, ios->power_mode, ios->vdd); 2367 else 2368 sdhci_set_power(host, ios->power_mode, ios->vdd); 2369 2370 if (host->ops->platform_send_init_74_clocks) 2371 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2372 2373 host->ops->set_bus_width(host, ios->bus_width); 2374 2375 /* 2376 * Special case to avoid multiple clock changes during voltage 2377 * switching. 2378 */ 2379 if (!reinit_uhs && 2380 turning_on_clk && 2381 host->timing == ios->timing && 2382 host->version >= SDHCI_SPEC_300 && 2383 !sdhci_presetable_values_change(host, ios)) 2384 return; 2385 2386 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2387 2388 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2389 if (ios->timing == MMC_TIMING_SD_HS || 2390 ios->timing == MMC_TIMING_MMC_HS || 2391 ios->timing == MMC_TIMING_MMC_HS400 || 2392 ios->timing == MMC_TIMING_MMC_HS200 || 2393 ios->timing == MMC_TIMING_MMC_DDR52 || 2394 ios->timing == MMC_TIMING_UHS_SDR50 || 2395 ios->timing == MMC_TIMING_UHS_SDR104 || 2396 ios->timing == MMC_TIMING_UHS_DDR50 || 2397 ios->timing == MMC_TIMING_UHS_SDR25) 2398 ctrl |= SDHCI_CTRL_HISPD; 2399 else 2400 ctrl &= ~SDHCI_CTRL_HISPD; 2401 } 2402 2403 if (host->version >= SDHCI_SPEC_300) { 2404 u16 clk, ctrl_2; 2405 2406 /* 2407 * According to SDHCI Spec v3.00, if the Preset Value 2408 * Enable in the Host Control 2 register is set, we 2409 * need to reset SD Clock Enable before changing High 2410 * Speed Enable to avoid generating clock glitches. 2411 */ 2412 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2413 if (clk & SDHCI_CLOCK_CARD_EN) { 2414 clk &= ~SDHCI_CLOCK_CARD_EN; 2415 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2416 } 2417 2418 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2419 2420 if (!host->preset_enabled) { 2421 /* 2422 * We only need to set Driver Strength if the 2423 * preset value enable is not set. 2424 */ 2425 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2426 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2427 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2428 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2429 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2430 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2431 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2432 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2433 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2434 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2435 else { 2436 pr_warn("%s: invalid driver type, default to driver type B\n", 2437 mmc_hostname(mmc)); 2438 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2439 } 2440 2441 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2442 host->drv_type = ios->drv_type; 2443 } 2444 2445 host->ops->set_uhs_signaling(host, ios->timing); 2446 host->timing = ios->timing; 2447 2448 if (sdhci_preset_needed(host, ios->timing)) { 2449 u16 preset; 2450 2451 sdhci_enable_preset_value(host, true); 2452 preset = sdhci_get_preset_value(host); 2453 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2454 preset); 2455 host->drv_type = ios->drv_type; 2456 } 2457 2458 /* Re-enable SD Clock */ 2459 host->ops->set_clock(host, host->clock); 2460 } else 2461 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2462 } 2463 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2464 2465 static int sdhci_get_cd(struct mmc_host *mmc) 2466 { 2467 struct sdhci_host *host = mmc_priv(mmc); 2468 int gpio_cd = mmc_gpio_get_cd(mmc); 2469 2470 if (host->flags & SDHCI_DEVICE_DEAD) 2471 return 0; 2472 2473 /* If nonremovable, assume that the card is always present. */ 2474 if (!mmc_card_is_removable(mmc)) 2475 return 1; 2476 2477 /* 2478 * Try slot gpio detect, if defined it take precedence 2479 * over build in controller functionality 2480 */ 2481 if (gpio_cd >= 0) 2482 return !!gpio_cd; 2483 2484 /* If polling, assume that the card is always present. */ 2485 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2486 return 1; 2487 2488 /* Host native card detect */ 2489 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2490 } 2491 2492 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2493 { 2494 struct sdhci_host *host = mmc_priv(mmc); 2495 unsigned long flags; 2496 int ret = 0; 2497 2498 spin_lock_irqsave(&host->lock, flags); 2499 2500 if (host->flags & SDHCI_DEVICE_DEAD) 2501 goto out; 2502 2503 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2504 out: 2505 spin_unlock_irqrestore(&host->lock, flags); 2506 2507 return ret; 2508 } 2509 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2510 2511 static int sdhci_check_ro(struct sdhci_host *host) 2512 { 2513 unsigned long flags; 2514 int is_readonly; 2515 2516 spin_lock_irqsave(&host->lock, flags); 2517 2518 if (host->flags & SDHCI_DEVICE_DEAD) 2519 is_readonly = 0; 2520 else if (host->ops->get_ro) 2521 is_readonly = host->ops->get_ro(host); 2522 else if (mmc_can_gpio_ro(host->mmc)) 2523 is_readonly = mmc_gpio_get_ro(host->mmc); 2524 else 2525 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2526 & SDHCI_WRITE_PROTECT); 2527 2528 spin_unlock_irqrestore(&host->lock, flags); 2529 2530 /* This quirk needs to be replaced by a callback-function later */ 2531 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2532 !is_readonly : is_readonly; 2533 } 2534 2535 #define SAMPLE_COUNT 5 2536 2537 static int sdhci_get_ro(struct mmc_host *mmc) 2538 { 2539 struct sdhci_host *host = mmc_priv(mmc); 2540 int i, ro_count; 2541 2542 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2543 return sdhci_check_ro(host); 2544 2545 ro_count = 0; 2546 for (i = 0; i < SAMPLE_COUNT; i++) { 2547 if (sdhci_check_ro(host)) { 2548 if (++ro_count > SAMPLE_COUNT / 2) 2549 return 1; 2550 } 2551 msleep(30); 2552 } 2553 return 0; 2554 } 2555 2556 static void sdhci_hw_reset(struct mmc_host *mmc) 2557 { 2558 struct sdhci_host *host = mmc_priv(mmc); 2559 2560 if (host->ops && host->ops->hw_reset) 2561 host->ops->hw_reset(host); 2562 } 2563 2564 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2565 { 2566 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2567 if (enable) 2568 host->ier |= SDHCI_INT_CARD_INT; 2569 else 2570 host->ier &= ~SDHCI_INT_CARD_INT; 2571 2572 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2573 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2574 } 2575 } 2576 2577 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2578 { 2579 struct sdhci_host *host = mmc_priv(mmc); 2580 unsigned long flags; 2581 2582 if (enable) 2583 pm_runtime_get_noresume(mmc_dev(mmc)); 2584 2585 spin_lock_irqsave(&host->lock, flags); 2586 sdhci_enable_sdio_irq_nolock(host, enable); 2587 spin_unlock_irqrestore(&host->lock, flags); 2588 2589 if (!enable) 2590 pm_runtime_put_noidle(mmc_dev(mmc)); 2591 } 2592 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2593 2594 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2595 { 2596 struct sdhci_host *host = mmc_priv(mmc); 2597 unsigned long flags; 2598 2599 spin_lock_irqsave(&host->lock, flags); 2600 sdhci_enable_sdio_irq_nolock(host, true); 2601 spin_unlock_irqrestore(&host->lock, flags); 2602 } 2603 2604 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2605 struct mmc_ios *ios) 2606 { 2607 struct sdhci_host *host = mmc_priv(mmc); 2608 u16 ctrl; 2609 int ret; 2610 2611 /* 2612 * Signal Voltage Switching is only applicable for Host Controllers 2613 * v3.00 and above. 2614 */ 2615 if (host->version < SDHCI_SPEC_300) 2616 return 0; 2617 2618 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2619 2620 switch (ios->signal_voltage) { 2621 case MMC_SIGNAL_VOLTAGE_330: 2622 if (!(host->flags & SDHCI_SIGNALING_330)) 2623 return -EINVAL; 2624 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2625 ctrl &= ~SDHCI_CTRL_VDD_180; 2626 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2627 2628 if (!IS_ERR(mmc->supply.vqmmc)) { 2629 ret = mmc_regulator_set_vqmmc(mmc, ios); 2630 if (ret < 0) { 2631 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2632 mmc_hostname(mmc)); 2633 return -EIO; 2634 } 2635 } 2636 /* Wait for 5ms */ 2637 usleep_range(5000, 5500); 2638 2639 /* 3.3V regulator output should be stable within 5 ms */ 2640 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2641 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2642 return 0; 2643 2644 pr_warn("%s: 3.3V regulator output did not become stable\n", 2645 mmc_hostname(mmc)); 2646 2647 return -EAGAIN; 2648 case MMC_SIGNAL_VOLTAGE_180: 2649 if (!(host->flags & SDHCI_SIGNALING_180)) 2650 return -EINVAL; 2651 if (!IS_ERR(mmc->supply.vqmmc)) { 2652 ret = mmc_regulator_set_vqmmc(mmc, ios); 2653 if (ret < 0) { 2654 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2655 mmc_hostname(mmc)); 2656 return -EIO; 2657 } 2658 } 2659 2660 /* 2661 * Enable 1.8V Signal Enable in the Host Control2 2662 * register 2663 */ 2664 ctrl |= SDHCI_CTRL_VDD_180; 2665 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2666 2667 /* Some controller need to do more when switching */ 2668 if (host->ops->voltage_switch) 2669 host->ops->voltage_switch(host); 2670 2671 /* 1.8V regulator output should be stable within 5 ms */ 2672 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2673 if (ctrl & SDHCI_CTRL_VDD_180) 2674 return 0; 2675 2676 pr_warn("%s: 1.8V regulator output did not become stable\n", 2677 mmc_hostname(mmc)); 2678 2679 return -EAGAIN; 2680 case MMC_SIGNAL_VOLTAGE_120: 2681 if (!(host->flags & SDHCI_SIGNALING_120)) 2682 return -EINVAL; 2683 if (!IS_ERR(mmc->supply.vqmmc)) { 2684 ret = mmc_regulator_set_vqmmc(mmc, ios); 2685 if (ret < 0) { 2686 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2687 mmc_hostname(mmc)); 2688 return -EIO; 2689 } 2690 } 2691 return 0; 2692 default: 2693 /* No signal voltage switch required */ 2694 return 0; 2695 } 2696 } 2697 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2698 2699 static int sdhci_card_busy(struct mmc_host *mmc) 2700 { 2701 struct sdhci_host *host = mmc_priv(mmc); 2702 u32 present_state; 2703 2704 /* Check whether DAT[0] is 0 */ 2705 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2706 2707 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2708 } 2709 2710 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2711 { 2712 struct sdhci_host *host = mmc_priv(mmc); 2713 unsigned long flags; 2714 2715 spin_lock_irqsave(&host->lock, flags); 2716 host->flags |= SDHCI_HS400_TUNING; 2717 spin_unlock_irqrestore(&host->lock, flags); 2718 2719 return 0; 2720 } 2721 2722 void sdhci_start_tuning(struct sdhci_host *host) 2723 { 2724 u16 ctrl; 2725 2726 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2727 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2728 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2729 ctrl |= SDHCI_CTRL_TUNED_CLK; 2730 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2731 2732 /* 2733 * As per the Host Controller spec v3.00, tuning command 2734 * generates Buffer Read Ready interrupt, so enable that. 2735 * 2736 * Note: The spec clearly says that when tuning sequence 2737 * is being performed, the controller does not generate 2738 * interrupts other than Buffer Read Ready interrupt. But 2739 * to make sure we don't hit a controller bug, we _only_ 2740 * enable Buffer Read Ready interrupt here. 2741 */ 2742 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2743 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2744 } 2745 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2746 2747 void sdhci_end_tuning(struct sdhci_host *host) 2748 { 2749 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2750 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2751 } 2752 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2753 2754 void sdhci_reset_tuning(struct sdhci_host *host) 2755 { 2756 u16 ctrl; 2757 2758 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2759 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2760 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2761 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2762 } 2763 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2764 2765 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2766 { 2767 sdhci_reset_tuning(host); 2768 2769 sdhci_reset_for(host, TUNING_ABORT); 2770 2771 sdhci_end_tuning(host); 2772 2773 mmc_send_abort_tuning(host->mmc, opcode); 2774 } 2775 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2776 2777 /* 2778 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2779 * tuning command does not have a data payload (or rather the hardware does it 2780 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2781 * interrupt setup is different to other commands and there is no timeout 2782 * interrupt so special handling is needed. 2783 */ 2784 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2785 { 2786 struct mmc_host *mmc = host->mmc; 2787 struct mmc_command cmd = {}; 2788 struct mmc_request mrq = {}; 2789 unsigned long flags; 2790 u32 b = host->sdma_boundary; 2791 2792 spin_lock_irqsave(&host->lock, flags); 2793 2794 cmd.opcode = opcode; 2795 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2796 cmd.mrq = &mrq; 2797 2798 mrq.cmd = &cmd; 2799 /* 2800 * In response to CMD19, the card sends 64 bytes of tuning 2801 * block to the Host Controller. So we set the block size 2802 * to 64 here. 2803 */ 2804 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2805 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2806 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2807 else 2808 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2809 2810 /* 2811 * The tuning block is sent by the card to the host controller. 2812 * So we set the TRNS_READ bit in the Transfer Mode register. 2813 * This also takes care of setting DMA Enable and Multi Block 2814 * Select in the same register to 0. 2815 */ 2816 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2817 2818 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2819 spin_unlock_irqrestore(&host->lock, flags); 2820 host->tuning_done = 0; 2821 return; 2822 } 2823 2824 host->cmd = NULL; 2825 2826 sdhci_del_timer(host, &mrq); 2827 2828 host->tuning_done = 0; 2829 2830 spin_unlock_irqrestore(&host->lock, flags); 2831 2832 /* Wait for Buffer Read Ready interrupt */ 2833 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2834 msecs_to_jiffies(50)); 2835 2836 } 2837 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2838 2839 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2840 { 2841 int i; 2842 2843 /* 2844 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2845 * of loops reaches tuning loop count. 2846 */ 2847 for (i = 0; i < host->tuning_loop_count; i++) { 2848 u16 ctrl; 2849 2850 sdhci_send_tuning(host, opcode); 2851 2852 if (!host->tuning_done) { 2853 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2854 mmc_hostname(host->mmc)); 2855 sdhci_abort_tuning(host, opcode); 2856 return -ETIMEDOUT; 2857 } 2858 2859 /* Spec does not require a delay between tuning cycles */ 2860 if (host->tuning_delay > 0) 2861 mdelay(host->tuning_delay); 2862 2863 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2864 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2865 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2866 return 0; /* Success! */ 2867 break; 2868 } 2869 2870 } 2871 2872 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2873 mmc_hostname(host->mmc)); 2874 sdhci_reset_tuning(host); 2875 return -EAGAIN; 2876 } 2877 2878 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2879 { 2880 struct sdhci_host *host = mmc_priv(mmc); 2881 int err = 0; 2882 unsigned int tuning_count = 0; 2883 bool hs400_tuning; 2884 2885 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2886 2887 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2888 tuning_count = host->tuning_count; 2889 2890 /* 2891 * The Host Controller needs tuning in case of SDR104 and DDR50 2892 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2893 * the Capabilities register. 2894 * If the Host Controller supports the HS200 mode then the 2895 * tuning function has to be executed. 2896 */ 2897 switch (host->timing) { 2898 /* HS400 tuning is done in HS200 mode */ 2899 case MMC_TIMING_MMC_HS400: 2900 err = -EINVAL; 2901 goto out; 2902 2903 case MMC_TIMING_MMC_HS200: 2904 /* 2905 * Periodic re-tuning for HS400 is not expected to be needed, so 2906 * disable it here. 2907 */ 2908 if (hs400_tuning) 2909 tuning_count = 0; 2910 break; 2911 2912 case MMC_TIMING_UHS_SDR104: 2913 case MMC_TIMING_UHS_DDR50: 2914 break; 2915 2916 case MMC_TIMING_UHS_SDR50: 2917 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2918 break; 2919 fallthrough; 2920 2921 default: 2922 goto out; 2923 } 2924 2925 if (host->ops->platform_execute_tuning) { 2926 err = host->ops->platform_execute_tuning(host, opcode); 2927 goto out; 2928 } 2929 2930 mmc->retune_period = tuning_count; 2931 2932 if (host->tuning_delay < 0) 2933 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2934 2935 sdhci_start_tuning(host); 2936 2937 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2938 2939 sdhci_end_tuning(host); 2940 out: 2941 host->flags &= ~SDHCI_HS400_TUNING; 2942 2943 return err; 2944 } 2945 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2946 2947 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2948 { 2949 /* Host Controller v3.00 defines preset value registers */ 2950 if (host->version < SDHCI_SPEC_300) 2951 return; 2952 2953 /* 2954 * We only enable or disable Preset Value if they are not already 2955 * enabled or disabled respectively. Otherwise, we bail out. 2956 */ 2957 if (host->preset_enabled != enable) { 2958 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2959 2960 if (enable) 2961 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2962 else 2963 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2964 2965 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2966 2967 if (enable) 2968 host->flags |= SDHCI_PV_ENABLED; 2969 else 2970 host->flags &= ~SDHCI_PV_ENABLED; 2971 2972 host->preset_enabled = enable; 2973 } 2974 } 2975 2976 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2977 int err) 2978 { 2979 struct mmc_data *data = mrq->data; 2980 2981 if (data->host_cookie != COOKIE_UNMAPPED) 2982 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 2983 mmc_get_dma_dir(data)); 2984 2985 data->host_cookie = COOKIE_UNMAPPED; 2986 } 2987 2988 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2989 { 2990 struct sdhci_host *host = mmc_priv(mmc); 2991 2992 mrq->data->host_cookie = COOKIE_UNMAPPED; 2993 2994 /* 2995 * No pre-mapping in the pre hook if we're using the bounce buffer, 2996 * for that we would need two bounce buffers since one buffer is 2997 * in flight when this is getting called. 2998 */ 2999 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 3000 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 3001 } 3002 3003 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 3004 { 3005 if (host->data_cmd) { 3006 host->data_cmd->error = err; 3007 sdhci_finish_mrq(host, host->data_cmd->mrq); 3008 } 3009 3010 if (host->cmd) { 3011 host->cmd->error = err; 3012 sdhci_finish_mrq(host, host->cmd->mrq); 3013 } 3014 } 3015 3016 static void sdhci_card_event(struct mmc_host *mmc) 3017 { 3018 struct sdhci_host *host = mmc_priv(mmc); 3019 unsigned long flags; 3020 int present; 3021 3022 /* First check if client has provided their own card event */ 3023 if (host->ops->card_event) 3024 host->ops->card_event(host); 3025 3026 present = mmc->ops->get_cd(mmc); 3027 3028 spin_lock_irqsave(&host->lock, flags); 3029 3030 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3031 if (sdhci_has_requests(host) && !present) { 3032 pr_err("%s: Card removed during transfer!\n", 3033 mmc_hostname(mmc)); 3034 pr_err("%s: Resetting controller.\n", 3035 mmc_hostname(mmc)); 3036 3037 sdhci_reset_for(host, CARD_REMOVED); 3038 3039 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3040 } 3041 3042 spin_unlock_irqrestore(&host->lock, flags); 3043 } 3044 3045 static const struct mmc_host_ops sdhci_ops = { 3046 .request = sdhci_request, 3047 .post_req = sdhci_post_req, 3048 .pre_req = sdhci_pre_req, 3049 .set_ios = sdhci_set_ios, 3050 .get_cd = sdhci_get_cd, 3051 .get_ro = sdhci_get_ro, 3052 .card_hw_reset = sdhci_hw_reset, 3053 .enable_sdio_irq = sdhci_enable_sdio_irq, 3054 .ack_sdio_irq = sdhci_ack_sdio_irq, 3055 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3056 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3057 .execute_tuning = sdhci_execute_tuning, 3058 .card_event = sdhci_card_event, 3059 .card_busy = sdhci_card_busy, 3060 }; 3061 3062 /*****************************************************************************\ 3063 * * 3064 * Request done * 3065 * * 3066 \*****************************************************************************/ 3067 3068 static bool sdhci_request_done(struct sdhci_host *host) 3069 { 3070 unsigned long flags; 3071 struct mmc_request *mrq; 3072 int i; 3073 3074 spin_lock_irqsave(&host->lock, flags); 3075 3076 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3077 mrq = host->mrqs_done[i]; 3078 if (mrq) 3079 break; 3080 } 3081 3082 if (!mrq) { 3083 spin_unlock_irqrestore(&host->lock, flags); 3084 return true; 3085 } 3086 3087 /* 3088 * The controller needs a reset of internal state machines 3089 * upon error conditions. 3090 */ 3091 if (sdhci_needs_reset(host, mrq)) { 3092 /* 3093 * Do not finish until command and data lines are available for 3094 * reset. Note there can only be one other mrq, so it cannot 3095 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3096 * would both be null. 3097 */ 3098 if (host->cmd || host->data_cmd) { 3099 spin_unlock_irqrestore(&host->lock, flags); 3100 return true; 3101 } 3102 3103 /* Some controllers need this kick or reset won't work here */ 3104 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3105 /* This is to force an update */ 3106 host->ops->set_clock(host, host->clock); 3107 3108 sdhci_reset_for(host, REQUEST_ERROR); 3109 3110 host->pending_reset = false; 3111 } 3112 3113 /* 3114 * Always unmap the data buffers if they were mapped by 3115 * sdhci_prepare_data() whenever we finish with a request. 3116 * This avoids leaking DMA mappings on error. 3117 */ 3118 if (host->flags & SDHCI_REQ_USE_DMA) { 3119 struct mmc_data *data = mrq->data; 3120 3121 if (host->use_external_dma && data && 3122 (mrq->cmd->error || data->error)) { 3123 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3124 3125 host->mrqs_done[i] = NULL; 3126 spin_unlock_irqrestore(&host->lock, flags); 3127 dmaengine_terminate_sync(chan); 3128 spin_lock_irqsave(&host->lock, flags); 3129 sdhci_set_mrq_done(host, mrq); 3130 } 3131 3132 if (data && data->host_cookie == COOKIE_MAPPED) { 3133 if (host->bounce_buffer) { 3134 /* 3135 * On reads, copy the bounced data into the 3136 * sglist 3137 */ 3138 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3139 unsigned int length = data->bytes_xfered; 3140 3141 if (length > host->bounce_buffer_size) { 3142 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3143 mmc_hostname(host->mmc), 3144 host->bounce_buffer_size, 3145 data->bytes_xfered); 3146 /* Cap it down and continue */ 3147 length = host->bounce_buffer_size; 3148 } 3149 dma_sync_single_for_cpu( 3150 mmc_dev(host->mmc), 3151 host->bounce_addr, 3152 host->bounce_buffer_size, 3153 DMA_FROM_DEVICE); 3154 sg_copy_from_buffer(data->sg, 3155 data->sg_len, 3156 host->bounce_buffer, 3157 length); 3158 } else { 3159 /* No copying, just switch ownership */ 3160 dma_sync_single_for_cpu( 3161 mmc_dev(host->mmc), 3162 host->bounce_addr, 3163 host->bounce_buffer_size, 3164 mmc_get_dma_dir(data)); 3165 } 3166 } else { 3167 /* Unmap the raw data */ 3168 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3169 data->sg_len, 3170 mmc_get_dma_dir(data)); 3171 } 3172 data->host_cookie = COOKIE_UNMAPPED; 3173 } 3174 } 3175 3176 host->mrqs_done[i] = NULL; 3177 3178 spin_unlock_irqrestore(&host->lock, flags); 3179 3180 if (host->ops->request_done) 3181 host->ops->request_done(host, mrq); 3182 else 3183 mmc_request_done(host->mmc, mrq); 3184 3185 return false; 3186 } 3187 3188 static void sdhci_complete_work(struct work_struct *work) 3189 { 3190 struct sdhci_host *host = container_of(work, struct sdhci_host, 3191 complete_work); 3192 3193 while (!sdhci_request_done(host)) 3194 ; 3195 } 3196 3197 static void sdhci_timeout_timer(struct timer_list *t) 3198 { 3199 struct sdhci_host *host; 3200 unsigned long flags; 3201 3202 host = from_timer(host, t, timer); 3203 3204 spin_lock_irqsave(&host->lock, flags); 3205 3206 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3207 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3208 mmc_hostname(host->mmc)); 3209 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3210 sdhci_dumpregs(host); 3211 3212 host->cmd->error = -ETIMEDOUT; 3213 sdhci_finish_mrq(host, host->cmd->mrq); 3214 } 3215 3216 spin_unlock_irqrestore(&host->lock, flags); 3217 } 3218 3219 static void sdhci_timeout_data_timer(struct timer_list *t) 3220 { 3221 struct sdhci_host *host; 3222 unsigned long flags; 3223 3224 host = from_timer(host, t, data_timer); 3225 3226 spin_lock_irqsave(&host->lock, flags); 3227 3228 if (host->data || host->data_cmd || 3229 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3230 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3231 mmc_hostname(host->mmc)); 3232 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3233 sdhci_dumpregs(host); 3234 3235 if (host->data) { 3236 host->data->error = -ETIMEDOUT; 3237 __sdhci_finish_data(host, true); 3238 queue_work(host->complete_wq, &host->complete_work); 3239 } else if (host->data_cmd) { 3240 host->data_cmd->error = -ETIMEDOUT; 3241 sdhci_finish_mrq(host, host->data_cmd->mrq); 3242 } else { 3243 host->cmd->error = -ETIMEDOUT; 3244 sdhci_finish_mrq(host, host->cmd->mrq); 3245 } 3246 } 3247 3248 spin_unlock_irqrestore(&host->lock, flags); 3249 } 3250 3251 /*****************************************************************************\ 3252 * * 3253 * Interrupt handling * 3254 * * 3255 \*****************************************************************************/ 3256 3257 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3258 { 3259 /* Handle auto-CMD12 error */ 3260 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3261 struct mmc_request *mrq = host->data_cmd->mrq; 3262 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3263 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3264 SDHCI_INT_DATA_TIMEOUT : 3265 SDHCI_INT_DATA_CRC; 3266 3267 /* Treat auto-CMD12 error the same as data error */ 3268 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3269 *intmask_p |= data_err_bit; 3270 return; 3271 } 3272 } 3273 3274 if (!host->cmd) { 3275 /* 3276 * SDHCI recovers from errors by resetting the cmd and data 3277 * circuits. Until that is done, there very well might be more 3278 * interrupts, so ignore them in that case. 3279 */ 3280 if (host->pending_reset) 3281 return; 3282 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3283 mmc_hostname(host->mmc), (unsigned)intmask); 3284 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3285 sdhci_dumpregs(host); 3286 return; 3287 } 3288 3289 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3290 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3291 if (intmask & SDHCI_INT_TIMEOUT) { 3292 host->cmd->error = -ETIMEDOUT; 3293 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3294 } else { 3295 host->cmd->error = -EILSEQ; 3296 if (!mmc_op_tuning(host->cmd->opcode)) 3297 sdhci_err_stats_inc(host, CMD_CRC); 3298 } 3299 /* Treat data command CRC error the same as data CRC error */ 3300 if (host->cmd->data && 3301 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3302 SDHCI_INT_CRC) { 3303 host->cmd = NULL; 3304 *intmask_p |= SDHCI_INT_DATA_CRC; 3305 return; 3306 } 3307 3308 __sdhci_finish_mrq(host, host->cmd->mrq); 3309 return; 3310 } 3311 3312 /* Handle auto-CMD23 error */ 3313 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3314 struct mmc_request *mrq = host->cmd->mrq; 3315 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3316 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3317 -ETIMEDOUT : 3318 -EILSEQ; 3319 3320 sdhci_err_stats_inc(host, AUTO_CMD); 3321 3322 if (sdhci_auto_cmd23(host, mrq)) { 3323 mrq->sbc->error = err; 3324 __sdhci_finish_mrq(host, mrq); 3325 return; 3326 } 3327 } 3328 3329 if (intmask & SDHCI_INT_RESPONSE) 3330 sdhci_finish_command(host); 3331 } 3332 3333 static void sdhci_adma_show_error(struct sdhci_host *host) 3334 { 3335 void *desc = host->adma_table; 3336 dma_addr_t dma = host->adma_addr; 3337 3338 sdhci_dumpregs(host); 3339 3340 while (true) { 3341 struct sdhci_adma2_64_desc *dma_desc = desc; 3342 3343 if (host->flags & SDHCI_USE_64_BIT_DMA) 3344 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3345 (unsigned long long)dma, 3346 le32_to_cpu(dma_desc->addr_hi), 3347 le32_to_cpu(dma_desc->addr_lo), 3348 le16_to_cpu(dma_desc->len), 3349 le16_to_cpu(dma_desc->cmd)); 3350 else 3351 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3352 (unsigned long long)dma, 3353 le32_to_cpu(dma_desc->addr_lo), 3354 le16_to_cpu(dma_desc->len), 3355 le16_to_cpu(dma_desc->cmd)); 3356 3357 desc += host->desc_sz; 3358 dma += host->desc_sz; 3359 3360 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3361 break; 3362 } 3363 } 3364 3365 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3366 { 3367 /* 3368 * CMD19 generates _only_ Buffer Read Ready interrupt if 3369 * use sdhci_send_tuning. 3370 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3371 * If not, sdhci_transfer_pio will never be called, make the 3372 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3373 */ 3374 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3375 if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) { 3376 host->tuning_done = 1; 3377 wake_up(&host->buf_ready_int); 3378 return; 3379 } 3380 } 3381 3382 if (!host->data) { 3383 struct mmc_command *data_cmd = host->data_cmd; 3384 3385 /* 3386 * The "data complete" interrupt is also used to 3387 * indicate that a busy state has ended. See comment 3388 * above in sdhci_cmd_irq(). 3389 */ 3390 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3391 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3392 host->data_cmd = NULL; 3393 data_cmd->error = -ETIMEDOUT; 3394 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3395 __sdhci_finish_mrq(host, data_cmd->mrq); 3396 return; 3397 } 3398 if (intmask & SDHCI_INT_DATA_END) { 3399 host->data_cmd = NULL; 3400 /* 3401 * Some cards handle busy-end interrupt 3402 * before the command completed, so make 3403 * sure we do things in the proper order. 3404 */ 3405 if (host->cmd == data_cmd) 3406 return; 3407 3408 __sdhci_finish_mrq(host, data_cmd->mrq); 3409 return; 3410 } 3411 } 3412 3413 /* 3414 * SDHCI recovers from errors by resetting the cmd and data 3415 * circuits. Until that is done, there very well might be more 3416 * interrupts, so ignore them in that case. 3417 */ 3418 if (host->pending_reset) 3419 return; 3420 3421 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3422 mmc_hostname(host->mmc), (unsigned)intmask); 3423 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3424 sdhci_dumpregs(host); 3425 3426 return; 3427 } 3428 3429 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3430 host->data->error = -ETIMEDOUT; 3431 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3432 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3433 host->data->error = -EILSEQ; 3434 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3435 sdhci_err_stats_inc(host, DAT_CRC); 3436 } else if ((intmask & SDHCI_INT_DATA_CRC) && 3437 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3438 != MMC_BUS_TEST_R) { 3439 host->data->error = -EILSEQ; 3440 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3441 sdhci_err_stats_inc(host, DAT_CRC); 3442 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3443 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3444 intmask); 3445 sdhci_adma_show_error(host); 3446 sdhci_err_stats_inc(host, ADMA); 3447 host->data->error = -EIO; 3448 if (host->ops->adma_workaround) 3449 host->ops->adma_workaround(host, intmask); 3450 } 3451 3452 if (host->data->error) 3453 sdhci_finish_data(host); 3454 else { 3455 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3456 sdhci_transfer_pio(host); 3457 3458 /* 3459 * We currently don't do anything fancy with DMA 3460 * boundaries, but as we can't disable the feature 3461 * we need to at least restart the transfer. 3462 * 3463 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3464 * should return a valid address to continue from, but as 3465 * some controllers are faulty, don't trust them. 3466 */ 3467 if (intmask & SDHCI_INT_DMA_END) { 3468 dma_addr_t dmastart, dmanow; 3469 3470 dmastart = sdhci_sdma_address(host); 3471 dmanow = dmastart + host->data->bytes_xfered; 3472 /* 3473 * Force update to the next DMA block boundary. 3474 */ 3475 dmanow = (dmanow & 3476 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3477 SDHCI_DEFAULT_BOUNDARY_SIZE; 3478 host->data->bytes_xfered = dmanow - dmastart; 3479 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3480 &dmastart, host->data->bytes_xfered, &dmanow); 3481 sdhci_set_sdma_addr(host, dmanow); 3482 } 3483 3484 if (intmask & SDHCI_INT_DATA_END) { 3485 if (host->cmd == host->data_cmd) { 3486 /* 3487 * Data managed to finish before the 3488 * command completed. Make sure we do 3489 * things in the proper order. 3490 */ 3491 host->data_early = 1; 3492 } else { 3493 sdhci_finish_data(host); 3494 } 3495 } 3496 } 3497 } 3498 3499 static inline bool sdhci_defer_done(struct sdhci_host *host, 3500 struct mmc_request *mrq) 3501 { 3502 struct mmc_data *data = mrq->data; 3503 3504 return host->pending_reset || host->always_defer_done || 3505 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3506 data->host_cookie == COOKIE_MAPPED); 3507 } 3508 3509 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3510 { 3511 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3512 irqreturn_t result = IRQ_NONE; 3513 struct sdhci_host *host = dev_id; 3514 u32 intmask, mask, unexpected = 0; 3515 int max_loops = 16; 3516 int i; 3517 3518 spin_lock(&host->lock); 3519 3520 if (host->runtime_suspended) { 3521 spin_unlock(&host->lock); 3522 return IRQ_NONE; 3523 } 3524 3525 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3526 if (!intmask || intmask == 0xffffffff) { 3527 result = IRQ_NONE; 3528 goto out; 3529 } 3530 3531 do { 3532 DBG("IRQ status 0x%08x\n", intmask); 3533 3534 if (host->ops->irq) { 3535 intmask = host->ops->irq(host, intmask); 3536 if (!intmask) 3537 goto cont; 3538 } 3539 3540 /* Clear selected interrupts. */ 3541 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3542 SDHCI_INT_BUS_POWER); 3543 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3544 3545 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3546 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3547 SDHCI_CARD_PRESENT; 3548 3549 /* 3550 * There is a observation on i.mx esdhc. INSERT 3551 * bit will be immediately set again when it gets 3552 * cleared, if a card is inserted. We have to mask 3553 * the irq to prevent interrupt storm which will 3554 * freeze the system. And the REMOVE gets the 3555 * same situation. 3556 * 3557 * More testing are needed here to ensure it works 3558 * for other platforms though. 3559 */ 3560 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3561 SDHCI_INT_CARD_REMOVE); 3562 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3563 SDHCI_INT_CARD_INSERT; 3564 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3565 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3566 3567 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3568 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3569 3570 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3571 SDHCI_INT_CARD_REMOVE); 3572 result = IRQ_WAKE_THREAD; 3573 } 3574 3575 if (intmask & SDHCI_INT_CMD_MASK) 3576 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3577 3578 if (intmask & SDHCI_INT_DATA_MASK) 3579 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3580 3581 if (intmask & SDHCI_INT_BUS_POWER) 3582 pr_err("%s: Card is consuming too much power!\n", 3583 mmc_hostname(host->mmc)); 3584 3585 if (intmask & SDHCI_INT_RETUNE) 3586 mmc_retune_needed(host->mmc); 3587 3588 if ((intmask & SDHCI_INT_CARD_INT) && 3589 (host->ier & SDHCI_INT_CARD_INT)) { 3590 sdhci_enable_sdio_irq_nolock(host, false); 3591 sdio_signal_irq(host->mmc); 3592 } 3593 3594 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3595 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3596 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3597 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3598 3599 if (intmask) { 3600 unexpected |= intmask; 3601 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3602 } 3603 cont: 3604 if (result == IRQ_NONE) 3605 result = IRQ_HANDLED; 3606 3607 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3608 } while (intmask && --max_loops); 3609 3610 /* Determine if mrqs can be completed immediately */ 3611 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3612 struct mmc_request *mrq = host->mrqs_done[i]; 3613 3614 if (!mrq) 3615 continue; 3616 3617 if (sdhci_defer_done(host, mrq)) { 3618 result = IRQ_WAKE_THREAD; 3619 } else { 3620 mrqs_done[i] = mrq; 3621 host->mrqs_done[i] = NULL; 3622 } 3623 } 3624 out: 3625 if (host->deferred_cmd) 3626 result = IRQ_WAKE_THREAD; 3627 3628 spin_unlock(&host->lock); 3629 3630 /* Process mrqs ready for immediate completion */ 3631 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3632 if (!mrqs_done[i]) 3633 continue; 3634 3635 if (host->ops->request_done) 3636 host->ops->request_done(host, mrqs_done[i]); 3637 else 3638 mmc_request_done(host->mmc, mrqs_done[i]); 3639 } 3640 3641 if (unexpected) { 3642 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3643 mmc_hostname(host->mmc), unexpected); 3644 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3645 sdhci_dumpregs(host); 3646 } 3647 3648 return result; 3649 } 3650 3651 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3652 { 3653 struct sdhci_host *host = dev_id; 3654 struct mmc_command *cmd; 3655 unsigned long flags; 3656 u32 isr; 3657 3658 while (!sdhci_request_done(host)) 3659 ; 3660 3661 spin_lock_irqsave(&host->lock, flags); 3662 3663 isr = host->thread_isr; 3664 host->thread_isr = 0; 3665 3666 cmd = host->deferred_cmd; 3667 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3668 sdhci_finish_mrq(host, cmd->mrq); 3669 3670 spin_unlock_irqrestore(&host->lock, flags); 3671 3672 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3673 struct mmc_host *mmc = host->mmc; 3674 3675 mmc->ops->card_event(mmc); 3676 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3677 } 3678 3679 return IRQ_HANDLED; 3680 } 3681 3682 /*****************************************************************************\ 3683 * * 3684 * Suspend/resume * 3685 * * 3686 \*****************************************************************************/ 3687 3688 #ifdef CONFIG_PM 3689 3690 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3691 { 3692 return mmc_card_is_removable(host->mmc) && 3693 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3694 !mmc_can_gpio_cd(host->mmc); 3695 } 3696 3697 /* 3698 * To enable wakeup events, the corresponding events have to be enabled in 3699 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3700 * Table' in the SD Host Controller Standard Specification. 3701 * It is useless to restore SDHCI_INT_ENABLE state in 3702 * sdhci_disable_irq_wakeups() since it will be set by 3703 * sdhci_enable_card_detection() or sdhci_init(). 3704 */ 3705 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3706 { 3707 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3708 SDHCI_WAKE_ON_INT; 3709 u32 irq_val = 0; 3710 u8 wake_val = 0; 3711 u8 val; 3712 3713 if (sdhci_cd_irq_can_wakeup(host)) { 3714 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3715 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3716 } 3717 3718 if (mmc_card_wake_sdio_irq(host->mmc)) { 3719 wake_val |= SDHCI_WAKE_ON_INT; 3720 irq_val |= SDHCI_INT_CARD_INT; 3721 } 3722 3723 if (!irq_val) 3724 return false; 3725 3726 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3727 val &= ~mask; 3728 val |= wake_val; 3729 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3730 3731 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3732 3733 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3734 3735 return host->irq_wake_enabled; 3736 } 3737 3738 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3739 { 3740 u8 val; 3741 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3742 | SDHCI_WAKE_ON_INT; 3743 3744 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3745 val &= ~mask; 3746 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3747 3748 disable_irq_wake(host->irq); 3749 3750 host->irq_wake_enabled = false; 3751 } 3752 3753 int sdhci_suspend_host(struct sdhci_host *host) 3754 { 3755 sdhci_disable_card_detection(host); 3756 3757 mmc_retune_timer_stop(host->mmc); 3758 3759 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3760 !sdhci_enable_irq_wakeups(host)) { 3761 host->ier = 0; 3762 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3763 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3764 free_irq(host->irq, host); 3765 } 3766 3767 return 0; 3768 } 3769 3770 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3771 3772 int sdhci_resume_host(struct sdhci_host *host) 3773 { 3774 struct mmc_host *mmc = host->mmc; 3775 int ret = 0; 3776 3777 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3778 if (host->ops->enable_dma) 3779 host->ops->enable_dma(host); 3780 } 3781 3782 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3783 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3784 /* Card keeps power but host controller does not */ 3785 sdhci_init(host, 0); 3786 host->pwr = 0; 3787 host->clock = 0; 3788 host->reinit_uhs = true; 3789 mmc->ops->set_ios(mmc, &mmc->ios); 3790 } else { 3791 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3792 } 3793 3794 if (host->irq_wake_enabled) { 3795 sdhci_disable_irq_wakeups(host); 3796 } else { 3797 ret = request_threaded_irq(host->irq, sdhci_irq, 3798 sdhci_thread_irq, IRQF_SHARED, 3799 mmc_hostname(mmc), host); 3800 if (ret) 3801 return ret; 3802 } 3803 3804 sdhci_enable_card_detection(host); 3805 3806 return ret; 3807 } 3808 3809 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3810 3811 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3812 { 3813 unsigned long flags; 3814 3815 mmc_retune_timer_stop(host->mmc); 3816 3817 spin_lock_irqsave(&host->lock, flags); 3818 host->ier &= SDHCI_INT_CARD_INT; 3819 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3820 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3821 spin_unlock_irqrestore(&host->lock, flags); 3822 3823 synchronize_hardirq(host->irq); 3824 3825 spin_lock_irqsave(&host->lock, flags); 3826 host->runtime_suspended = true; 3827 spin_unlock_irqrestore(&host->lock, flags); 3828 3829 return 0; 3830 } 3831 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3832 3833 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3834 { 3835 struct mmc_host *mmc = host->mmc; 3836 unsigned long flags; 3837 int host_flags = host->flags; 3838 3839 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3840 if (host->ops->enable_dma) 3841 host->ops->enable_dma(host); 3842 } 3843 3844 sdhci_init(host, soft_reset); 3845 3846 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3847 mmc->ios.power_mode != MMC_POWER_OFF) { 3848 /* Force clock and power re-program */ 3849 host->pwr = 0; 3850 host->clock = 0; 3851 host->reinit_uhs = true; 3852 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3853 mmc->ops->set_ios(mmc, &mmc->ios); 3854 3855 if ((host_flags & SDHCI_PV_ENABLED) && 3856 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3857 spin_lock_irqsave(&host->lock, flags); 3858 sdhci_enable_preset_value(host, true); 3859 spin_unlock_irqrestore(&host->lock, flags); 3860 } 3861 3862 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3863 mmc->ops->hs400_enhanced_strobe) 3864 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3865 } 3866 3867 spin_lock_irqsave(&host->lock, flags); 3868 3869 host->runtime_suspended = false; 3870 3871 /* Enable SDIO IRQ */ 3872 if (sdio_irq_claimed(mmc)) 3873 sdhci_enable_sdio_irq_nolock(host, true); 3874 3875 /* Enable Card Detection */ 3876 sdhci_enable_card_detection(host); 3877 3878 spin_unlock_irqrestore(&host->lock, flags); 3879 3880 return 0; 3881 } 3882 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3883 3884 #endif /* CONFIG_PM */ 3885 3886 /*****************************************************************************\ 3887 * * 3888 * Command Queue Engine (CQE) helpers * 3889 * * 3890 \*****************************************************************************/ 3891 3892 void sdhci_cqe_enable(struct mmc_host *mmc) 3893 { 3894 struct sdhci_host *host = mmc_priv(mmc); 3895 unsigned long flags; 3896 u8 ctrl; 3897 3898 spin_lock_irqsave(&host->lock, flags); 3899 3900 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3901 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3902 /* 3903 * Host from V4.10 supports ADMA3 DMA type. 3904 * ADMA3 performs integrated descriptor which is more suitable 3905 * for cmd queuing to fetch both command and transfer descriptors. 3906 */ 3907 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3908 ctrl |= SDHCI_CTRL_ADMA3; 3909 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3910 ctrl |= SDHCI_CTRL_ADMA64; 3911 else 3912 ctrl |= SDHCI_CTRL_ADMA32; 3913 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3914 3915 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3916 SDHCI_BLOCK_SIZE); 3917 3918 /* Set maximum timeout */ 3919 sdhci_set_timeout(host, NULL); 3920 3921 host->ier = host->cqe_ier; 3922 3923 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3924 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3925 3926 host->cqe_on = true; 3927 3928 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3929 mmc_hostname(mmc), host->ier, 3930 sdhci_readl(host, SDHCI_INT_STATUS)); 3931 3932 spin_unlock_irqrestore(&host->lock, flags); 3933 } 3934 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3935 3936 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3937 { 3938 struct sdhci_host *host = mmc_priv(mmc); 3939 unsigned long flags; 3940 3941 spin_lock_irqsave(&host->lock, flags); 3942 3943 sdhci_set_default_irqs(host); 3944 3945 host->cqe_on = false; 3946 3947 if (recovery) 3948 sdhci_reset_for(host, CQE_RECOVERY); 3949 3950 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3951 mmc_hostname(mmc), host->ier, 3952 sdhci_readl(host, SDHCI_INT_STATUS)); 3953 3954 spin_unlock_irqrestore(&host->lock, flags); 3955 } 3956 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3957 3958 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3959 int *data_error) 3960 { 3961 u32 mask; 3962 3963 if (!host->cqe_on) 3964 return false; 3965 3966 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 3967 *cmd_error = -EILSEQ; 3968 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3969 sdhci_err_stats_inc(host, CMD_CRC); 3970 } else if (intmask & SDHCI_INT_TIMEOUT) { 3971 *cmd_error = -ETIMEDOUT; 3972 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3973 } else 3974 *cmd_error = 0; 3975 3976 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) { 3977 *data_error = -EILSEQ; 3978 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3979 sdhci_err_stats_inc(host, DAT_CRC); 3980 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3981 *data_error = -ETIMEDOUT; 3982 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3983 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3984 *data_error = -EIO; 3985 sdhci_err_stats_inc(host, ADMA); 3986 } else 3987 *data_error = 0; 3988 3989 /* Clear selected interrupts. */ 3990 mask = intmask & host->cqe_ier; 3991 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3992 3993 if (intmask & SDHCI_INT_BUS_POWER) 3994 pr_err("%s: Card is consuming too much power!\n", 3995 mmc_hostname(host->mmc)); 3996 3997 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3998 if (intmask) { 3999 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 4000 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 4001 mmc_hostname(host->mmc), intmask); 4002 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 4003 sdhci_dumpregs(host); 4004 } 4005 4006 return true; 4007 } 4008 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 4009 4010 /*****************************************************************************\ 4011 * * 4012 * Device allocation/registration * 4013 * * 4014 \*****************************************************************************/ 4015 4016 struct sdhci_host *sdhci_alloc_host(struct device *dev, 4017 size_t priv_size) 4018 { 4019 struct mmc_host *mmc; 4020 struct sdhci_host *host; 4021 4022 WARN_ON(dev == NULL); 4023 4024 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4025 if (!mmc) 4026 return ERR_PTR(-ENOMEM); 4027 4028 host = mmc_priv(mmc); 4029 host->mmc = mmc; 4030 host->mmc_host_ops = sdhci_ops; 4031 mmc->ops = &host->mmc_host_ops; 4032 4033 host->flags = SDHCI_SIGNALING_330; 4034 4035 host->cqe_ier = SDHCI_CQE_INT_MASK; 4036 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4037 4038 host->tuning_delay = -1; 4039 host->tuning_loop_count = MAX_TUNING_LOOP; 4040 4041 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4042 4043 /* 4044 * The DMA table descriptor count is calculated as the maximum 4045 * number of segments times 2, to allow for an alignment 4046 * descriptor for each segment, plus 1 for a nop end descriptor. 4047 */ 4048 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4049 host->max_adma = 65536; 4050 4051 host->max_timeout_count = 0xE; 4052 4053 return host; 4054 } 4055 4056 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4057 4058 static int sdhci_set_dma_mask(struct sdhci_host *host) 4059 { 4060 struct mmc_host *mmc = host->mmc; 4061 struct device *dev = mmc_dev(mmc); 4062 int ret = -EINVAL; 4063 4064 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4065 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4066 4067 /* Try 64-bit mask if hardware is capable of it */ 4068 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4069 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4070 if (ret) { 4071 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4072 mmc_hostname(mmc)); 4073 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4074 } 4075 } 4076 4077 /* 32-bit mask as default & fallback */ 4078 if (ret) { 4079 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4080 if (ret) 4081 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4082 mmc_hostname(mmc)); 4083 } 4084 4085 return ret; 4086 } 4087 4088 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4089 const u32 *caps, const u32 *caps1) 4090 { 4091 u16 v; 4092 u64 dt_caps_mask = 0; 4093 u64 dt_caps = 0; 4094 4095 if (host->read_caps) 4096 return; 4097 4098 host->read_caps = true; 4099 4100 if (debug_quirks) 4101 host->quirks = debug_quirks; 4102 4103 if (debug_quirks2) 4104 host->quirks2 = debug_quirks2; 4105 4106 sdhci_reset_for_all(host); 4107 4108 if (host->v4_mode) 4109 sdhci_do_enable_v4_mode(host); 4110 4111 device_property_read_u64(mmc_dev(host->mmc), 4112 "sdhci-caps-mask", &dt_caps_mask); 4113 device_property_read_u64(mmc_dev(host->mmc), 4114 "sdhci-caps", &dt_caps); 4115 4116 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4117 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4118 4119 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 4120 return; 4121 4122 if (caps) { 4123 host->caps = *caps; 4124 } else { 4125 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4126 host->caps &= ~lower_32_bits(dt_caps_mask); 4127 host->caps |= lower_32_bits(dt_caps); 4128 } 4129 4130 if (host->version < SDHCI_SPEC_300) 4131 return; 4132 4133 if (caps1) { 4134 host->caps1 = *caps1; 4135 } else { 4136 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4137 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4138 host->caps1 |= upper_32_bits(dt_caps); 4139 } 4140 } 4141 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4142 4143 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4144 { 4145 struct mmc_host *mmc = host->mmc; 4146 unsigned int max_blocks; 4147 unsigned int bounce_size; 4148 int ret; 4149 4150 /* 4151 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4152 * has diminishing returns, this is probably because SD/MMC 4153 * cards are usually optimized to handle this size of requests. 4154 */ 4155 bounce_size = SZ_64K; 4156 /* 4157 * Adjust downwards to maximum request size if this is less 4158 * than our segment size, else hammer down the maximum 4159 * request size to the maximum buffer size. 4160 */ 4161 if (mmc->max_req_size < bounce_size) 4162 bounce_size = mmc->max_req_size; 4163 max_blocks = bounce_size / 512; 4164 4165 /* 4166 * When we just support one segment, we can get significant 4167 * speedups by the help of a bounce buffer to group scattered 4168 * reads/writes together. 4169 */ 4170 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4171 bounce_size, 4172 GFP_KERNEL); 4173 if (!host->bounce_buffer) { 4174 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4175 mmc_hostname(mmc), 4176 bounce_size); 4177 /* 4178 * Exiting with zero here makes sure we proceed with 4179 * mmc->max_segs == 1. 4180 */ 4181 return; 4182 } 4183 4184 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4185 host->bounce_buffer, 4186 bounce_size, 4187 DMA_BIDIRECTIONAL); 4188 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4189 if (ret) { 4190 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4191 host->bounce_buffer = NULL; 4192 /* Again fall back to max_segs == 1 */ 4193 return; 4194 } 4195 4196 host->bounce_buffer_size = bounce_size; 4197 4198 /* Lie about this since we're bouncing */ 4199 mmc->max_segs = max_blocks; 4200 mmc->max_seg_size = bounce_size; 4201 mmc->max_req_size = bounce_size; 4202 4203 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4204 mmc_hostname(mmc), max_blocks, bounce_size); 4205 } 4206 4207 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4208 { 4209 /* 4210 * According to SD Host Controller spec v4.10, bit[27] added from 4211 * version 4.10 in Capabilities Register is used as 64-bit System 4212 * Address support for V4 mode. 4213 */ 4214 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4215 return host->caps & SDHCI_CAN_64BIT_V4; 4216 4217 return host->caps & SDHCI_CAN_64BIT; 4218 } 4219 4220 int sdhci_setup_host(struct sdhci_host *host) 4221 { 4222 struct mmc_host *mmc; 4223 u32 max_current_caps; 4224 unsigned int ocr_avail; 4225 unsigned int override_timeout_clk; 4226 u32 max_clk; 4227 int ret = 0; 4228 bool enable_vqmmc = false; 4229 4230 WARN_ON(host == NULL); 4231 if (host == NULL) 4232 return -EINVAL; 4233 4234 mmc = host->mmc; 4235 4236 /* 4237 * If there are external regulators, get them. Note this must be done 4238 * early before resetting the host and reading the capabilities so that 4239 * the host can take the appropriate action if regulators are not 4240 * available. 4241 */ 4242 if (!mmc->supply.vqmmc) { 4243 ret = mmc_regulator_get_supply(mmc); 4244 if (ret) 4245 return ret; 4246 enable_vqmmc = true; 4247 } 4248 4249 DBG("Version: 0x%08x | Present: 0x%08x\n", 4250 sdhci_readw(host, SDHCI_HOST_VERSION), 4251 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4252 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4253 sdhci_readl(host, SDHCI_CAPABILITIES), 4254 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4255 4256 sdhci_read_caps(host); 4257 4258 override_timeout_clk = host->timeout_clk; 4259 4260 if (host->version > SDHCI_SPEC_420) { 4261 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4262 mmc_hostname(mmc), host->version); 4263 } 4264 4265 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4266 host->flags |= SDHCI_USE_SDMA; 4267 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4268 DBG("Controller doesn't have SDMA capability\n"); 4269 else 4270 host->flags |= SDHCI_USE_SDMA; 4271 4272 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4273 (host->flags & SDHCI_USE_SDMA)) { 4274 DBG("Disabling DMA as it is marked broken\n"); 4275 host->flags &= ~SDHCI_USE_SDMA; 4276 } 4277 4278 if ((host->version >= SDHCI_SPEC_200) && 4279 (host->caps & SDHCI_CAN_DO_ADMA2)) 4280 host->flags |= SDHCI_USE_ADMA; 4281 4282 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4283 (host->flags & SDHCI_USE_ADMA)) { 4284 DBG("Disabling ADMA as it is marked broken\n"); 4285 host->flags &= ~SDHCI_USE_ADMA; 4286 } 4287 4288 if (sdhci_can_64bit_dma(host)) 4289 host->flags |= SDHCI_USE_64_BIT_DMA; 4290 4291 if (host->use_external_dma) { 4292 ret = sdhci_external_dma_init(host); 4293 if (ret == -EPROBE_DEFER) 4294 goto unreg; 4295 /* 4296 * Fall back to use the DMA/PIO integrated in standard SDHCI 4297 * instead of external DMA devices. 4298 */ 4299 else if (ret) 4300 sdhci_switch_external_dma(host, false); 4301 /* Disable internal DMA sources */ 4302 else 4303 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4304 } 4305 4306 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4307 if (host->ops->set_dma_mask) 4308 ret = host->ops->set_dma_mask(host); 4309 else 4310 ret = sdhci_set_dma_mask(host); 4311 4312 if (!ret && host->ops->enable_dma) 4313 ret = host->ops->enable_dma(host); 4314 4315 if (ret) { 4316 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4317 mmc_hostname(mmc)); 4318 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4319 4320 ret = 0; 4321 } 4322 } 4323 4324 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4325 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4326 host->flags &= ~SDHCI_USE_SDMA; 4327 4328 if (host->flags & SDHCI_USE_ADMA) { 4329 dma_addr_t dma; 4330 void *buf; 4331 4332 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4333 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4334 else if (!host->alloc_desc_sz) 4335 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4336 4337 host->desc_sz = host->alloc_desc_sz; 4338 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4339 4340 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4341 /* 4342 * Use zalloc to zero the reserved high 32-bits of 128-bit 4343 * descriptors so that they never need to be written. 4344 */ 4345 buf = dma_alloc_coherent(mmc_dev(mmc), 4346 host->align_buffer_sz + host->adma_table_sz, 4347 &dma, GFP_KERNEL); 4348 if (!buf) { 4349 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4350 mmc_hostname(mmc)); 4351 host->flags &= ~SDHCI_USE_ADMA; 4352 } else if ((dma + host->align_buffer_sz) & 4353 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4354 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4355 mmc_hostname(mmc)); 4356 host->flags &= ~SDHCI_USE_ADMA; 4357 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4358 host->adma_table_sz, buf, dma); 4359 } else { 4360 host->align_buffer = buf; 4361 host->align_addr = dma; 4362 4363 host->adma_table = buf + host->align_buffer_sz; 4364 host->adma_addr = dma + host->align_buffer_sz; 4365 } 4366 } 4367 4368 /* 4369 * If we use DMA, then it's up to the caller to set the DMA 4370 * mask, but PIO does not need the hw shim so we set a new 4371 * mask here in that case. 4372 */ 4373 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4374 host->dma_mask = DMA_BIT_MASK(64); 4375 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4376 } 4377 4378 if (host->version >= SDHCI_SPEC_300) 4379 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4380 else 4381 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4382 4383 host->max_clk *= 1000000; 4384 if (host->max_clk == 0 || host->quirks & 4385 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4386 if (!host->ops->get_max_clock) { 4387 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4388 mmc_hostname(mmc)); 4389 ret = -ENODEV; 4390 goto undma; 4391 } 4392 host->max_clk = host->ops->get_max_clock(host); 4393 } 4394 4395 /* 4396 * In case of Host Controller v3.00, find out whether clock 4397 * multiplier is supported. 4398 */ 4399 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4400 4401 /* 4402 * In case the value in Clock Multiplier is 0, then programmable 4403 * clock mode is not supported, otherwise the actual clock 4404 * multiplier is one more than the value of Clock Multiplier 4405 * in the Capabilities Register. 4406 */ 4407 if (host->clk_mul) 4408 host->clk_mul += 1; 4409 4410 /* 4411 * Set host parameters. 4412 */ 4413 max_clk = host->max_clk; 4414 4415 if (host->ops->get_min_clock) 4416 mmc->f_min = host->ops->get_min_clock(host); 4417 else if (host->version >= SDHCI_SPEC_300) { 4418 if (host->clk_mul) 4419 max_clk = host->max_clk * host->clk_mul; 4420 /* 4421 * Divided Clock Mode minimum clock rate is always less than 4422 * Programmable Clock Mode minimum clock rate. 4423 */ 4424 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4425 } else 4426 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4427 4428 if (!mmc->f_max || mmc->f_max > max_clk) 4429 mmc->f_max = max_clk; 4430 4431 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4432 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4433 4434 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4435 host->timeout_clk *= 1000; 4436 4437 if (host->timeout_clk == 0) { 4438 if (!host->ops->get_timeout_clock) { 4439 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4440 mmc_hostname(mmc)); 4441 ret = -ENODEV; 4442 goto undma; 4443 } 4444 4445 host->timeout_clk = 4446 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4447 1000); 4448 } 4449 4450 if (override_timeout_clk) 4451 host->timeout_clk = override_timeout_clk; 4452 4453 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4454 host->ops->get_max_timeout_count(host) : 1 << 27; 4455 mmc->max_busy_timeout /= host->timeout_clk; 4456 } 4457 4458 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4459 !host->ops->get_max_timeout_count) 4460 mmc->max_busy_timeout = 0; 4461 4462 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4463 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4464 4465 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4466 host->flags |= SDHCI_AUTO_CMD12; 4467 4468 /* 4469 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4470 * For v4 mode, SDMA may use Auto-CMD23 as well. 4471 */ 4472 if ((host->version >= SDHCI_SPEC_300) && 4473 ((host->flags & SDHCI_USE_ADMA) || 4474 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4475 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4476 host->flags |= SDHCI_AUTO_CMD23; 4477 DBG("Auto-CMD23 available\n"); 4478 } else { 4479 DBG("Auto-CMD23 unavailable\n"); 4480 } 4481 4482 /* 4483 * A controller may support 8-bit width, but the board itself 4484 * might not have the pins brought out. Boards that support 4485 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4486 * their platform code before calling sdhci_add_host(), and we 4487 * won't assume 8-bit width for hosts without that CAP. 4488 */ 4489 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4490 mmc->caps |= MMC_CAP_4_BIT_DATA; 4491 4492 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4493 mmc->caps &= ~MMC_CAP_CMD23; 4494 4495 if (host->caps & SDHCI_CAN_DO_HISPD) 4496 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4497 4498 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4499 mmc_card_is_removable(mmc) && 4500 mmc_gpio_get_cd(mmc) < 0) 4501 mmc->caps |= MMC_CAP_NEEDS_POLL; 4502 4503 if (!IS_ERR(mmc->supply.vqmmc)) { 4504 if (enable_vqmmc) { 4505 ret = regulator_enable(mmc->supply.vqmmc); 4506 host->sdhci_core_to_disable_vqmmc = !ret; 4507 } 4508 4509 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4510 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4511 1950000)) 4512 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4513 SDHCI_SUPPORT_SDR50 | 4514 SDHCI_SUPPORT_DDR50); 4515 4516 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4517 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4518 3600000)) 4519 host->flags &= ~SDHCI_SIGNALING_330; 4520 4521 if (ret) { 4522 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4523 mmc_hostname(mmc), ret); 4524 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4525 } 4526 4527 } 4528 4529 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4530 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4531 SDHCI_SUPPORT_DDR50); 4532 /* 4533 * The SDHCI controller in a SoC might support HS200/HS400 4534 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4535 * but if the board is modeled such that the IO lines are not 4536 * connected to 1.8v then HS200/HS400 cannot be supported. 4537 * Disable HS200/HS400 if the board does not have 1.8v connected 4538 * to the IO lines. (Applicable for other modes in 1.8v) 4539 */ 4540 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4541 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4542 } 4543 4544 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4545 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4546 SDHCI_SUPPORT_DDR50)) 4547 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4548 4549 /* SDR104 supports also implies SDR50 support */ 4550 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4551 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4552 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4553 * field can be promoted to support HS200. 4554 */ 4555 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4556 mmc->caps2 |= MMC_CAP2_HS200; 4557 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4558 mmc->caps |= MMC_CAP_UHS_SDR50; 4559 } 4560 4561 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4562 (host->caps1 & SDHCI_SUPPORT_HS400)) 4563 mmc->caps2 |= MMC_CAP2_HS400; 4564 4565 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4566 (IS_ERR(mmc->supply.vqmmc) || 4567 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4568 1300000))) 4569 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4570 4571 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4572 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4573 mmc->caps |= MMC_CAP_UHS_DDR50; 4574 4575 /* Does the host need tuning for SDR50? */ 4576 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4577 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4578 4579 /* Driver Type(s) (A, C, D) supported by the host */ 4580 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4581 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4582 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4583 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4584 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4585 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4586 4587 /* Initial value for re-tuning timer count */ 4588 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4589 host->caps1); 4590 4591 /* 4592 * In case Re-tuning Timer is not disabled, the actual value of 4593 * re-tuning timer will be 2 ^ (n - 1). 4594 */ 4595 if (host->tuning_count) 4596 host->tuning_count = 1 << (host->tuning_count - 1); 4597 4598 /* Re-tuning mode supported by the Host Controller */ 4599 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4600 4601 ocr_avail = 0; 4602 4603 /* 4604 * According to SD Host Controller spec v3.00, if the Host System 4605 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4606 * the value is meaningful only if Voltage Support in the Capabilities 4607 * register is set. The actual current value is 4 times the register 4608 * value. 4609 */ 4610 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4611 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4612 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4613 if (curr > 0) { 4614 4615 /* convert to SDHCI_MAX_CURRENT format */ 4616 curr = curr/1000; /* convert to mA */ 4617 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4618 4619 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4620 max_current_caps = 4621 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4622 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4623 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4624 } 4625 } 4626 4627 if (host->caps & SDHCI_CAN_VDD_330) { 4628 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4629 4630 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4631 max_current_caps) * 4632 SDHCI_MAX_CURRENT_MULTIPLIER; 4633 } 4634 if (host->caps & SDHCI_CAN_VDD_300) { 4635 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4636 4637 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4638 max_current_caps) * 4639 SDHCI_MAX_CURRENT_MULTIPLIER; 4640 } 4641 if (host->caps & SDHCI_CAN_VDD_180) { 4642 ocr_avail |= MMC_VDD_165_195; 4643 4644 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4645 max_current_caps) * 4646 SDHCI_MAX_CURRENT_MULTIPLIER; 4647 } 4648 4649 /* If OCR set by host, use it instead. */ 4650 if (host->ocr_mask) 4651 ocr_avail = host->ocr_mask; 4652 4653 /* If OCR set by external regulators, give it highest prio. */ 4654 if (mmc->ocr_avail) 4655 ocr_avail = mmc->ocr_avail; 4656 4657 mmc->ocr_avail = ocr_avail; 4658 mmc->ocr_avail_sdio = ocr_avail; 4659 if (host->ocr_avail_sdio) 4660 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4661 mmc->ocr_avail_sd = ocr_avail; 4662 if (host->ocr_avail_sd) 4663 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4664 else /* normal SD controllers don't support 1.8V */ 4665 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4666 mmc->ocr_avail_mmc = ocr_avail; 4667 if (host->ocr_avail_mmc) 4668 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4669 4670 if (mmc->ocr_avail == 0) { 4671 pr_err("%s: Hardware doesn't report any support voltages.\n", 4672 mmc_hostname(mmc)); 4673 ret = -ENODEV; 4674 goto unreg; 4675 } 4676 4677 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4678 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4679 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4680 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4681 host->flags |= SDHCI_SIGNALING_180; 4682 4683 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4684 host->flags |= SDHCI_SIGNALING_120; 4685 4686 spin_lock_init(&host->lock); 4687 4688 /* 4689 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4690 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4691 * is less anyway. 4692 */ 4693 mmc->max_req_size = 524288; 4694 4695 /* 4696 * Maximum number of segments. Depends on if the hardware 4697 * can do scatter/gather or not. 4698 */ 4699 if (host->flags & SDHCI_USE_ADMA) { 4700 mmc->max_segs = SDHCI_MAX_SEGS; 4701 } else if (host->flags & SDHCI_USE_SDMA) { 4702 mmc->max_segs = 1; 4703 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4704 dma_max_mapping_size(mmc_dev(mmc))); 4705 } else { /* PIO */ 4706 mmc->max_segs = SDHCI_MAX_SEGS; 4707 } 4708 4709 /* 4710 * Maximum segment size. Could be one segment with the maximum number 4711 * of bytes. When doing hardware scatter/gather, each entry cannot 4712 * be larger than 64 KiB though. 4713 */ 4714 if (host->flags & SDHCI_USE_ADMA) { 4715 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4716 host->max_adma = 65532; /* 32-bit alignment */ 4717 mmc->max_seg_size = 65535; 4718 } else { 4719 mmc->max_seg_size = 65536; 4720 } 4721 } else { 4722 mmc->max_seg_size = mmc->max_req_size; 4723 } 4724 4725 /* 4726 * Maximum block size. This varies from controller to controller and 4727 * is specified in the capabilities register. 4728 */ 4729 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4730 mmc->max_blk_size = 2; 4731 } else { 4732 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4733 SDHCI_MAX_BLOCK_SHIFT; 4734 if (mmc->max_blk_size >= 3) { 4735 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4736 mmc_hostname(mmc)); 4737 mmc->max_blk_size = 0; 4738 } 4739 } 4740 4741 mmc->max_blk_size = 512 << mmc->max_blk_size; 4742 4743 /* 4744 * Maximum block count. 4745 */ 4746 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4747 4748 if (mmc->max_segs == 1) 4749 /* This may alter mmc->*_blk_* parameters */ 4750 sdhci_allocate_bounce_buffer(host); 4751 4752 return 0; 4753 4754 unreg: 4755 if (host->sdhci_core_to_disable_vqmmc) 4756 regulator_disable(mmc->supply.vqmmc); 4757 undma: 4758 if (host->align_buffer) 4759 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4760 host->adma_table_sz, host->align_buffer, 4761 host->align_addr); 4762 host->adma_table = NULL; 4763 host->align_buffer = NULL; 4764 4765 return ret; 4766 } 4767 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4768 4769 void sdhci_cleanup_host(struct sdhci_host *host) 4770 { 4771 struct mmc_host *mmc = host->mmc; 4772 4773 if (host->sdhci_core_to_disable_vqmmc) 4774 regulator_disable(mmc->supply.vqmmc); 4775 4776 if (host->align_buffer) 4777 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4778 host->adma_table_sz, host->align_buffer, 4779 host->align_addr); 4780 4781 if (host->use_external_dma) 4782 sdhci_external_dma_release(host); 4783 4784 host->adma_table = NULL; 4785 host->align_buffer = NULL; 4786 } 4787 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4788 4789 int __sdhci_add_host(struct sdhci_host *host) 4790 { 4791 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4792 struct mmc_host *mmc = host->mmc; 4793 int ret; 4794 4795 if ((mmc->caps2 & MMC_CAP2_CQE) && 4796 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4797 mmc->caps2 &= ~MMC_CAP2_CQE; 4798 mmc->cqe_ops = NULL; 4799 } 4800 4801 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4802 if (!host->complete_wq) 4803 return -ENOMEM; 4804 4805 INIT_WORK(&host->complete_work, sdhci_complete_work); 4806 4807 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4808 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4809 4810 init_waitqueue_head(&host->buf_ready_int); 4811 4812 sdhci_init(host, 0); 4813 4814 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4815 IRQF_SHARED, mmc_hostname(mmc), host); 4816 if (ret) { 4817 pr_err("%s: Failed to request IRQ %d: %d\n", 4818 mmc_hostname(mmc), host->irq, ret); 4819 goto unwq; 4820 } 4821 4822 ret = sdhci_led_register(host); 4823 if (ret) { 4824 pr_err("%s: Failed to register LED device: %d\n", 4825 mmc_hostname(mmc), ret); 4826 goto unirq; 4827 } 4828 4829 ret = mmc_add_host(mmc); 4830 if (ret) 4831 goto unled; 4832 4833 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4834 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4835 host->use_external_dma ? "External DMA" : 4836 (host->flags & SDHCI_USE_ADMA) ? 4837 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4838 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4839 4840 sdhci_enable_card_detection(host); 4841 4842 return 0; 4843 4844 unled: 4845 sdhci_led_unregister(host); 4846 unirq: 4847 sdhci_reset_for_all(host); 4848 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4849 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4850 free_irq(host->irq, host); 4851 unwq: 4852 destroy_workqueue(host->complete_wq); 4853 4854 return ret; 4855 } 4856 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4857 4858 int sdhci_add_host(struct sdhci_host *host) 4859 { 4860 int ret; 4861 4862 ret = sdhci_setup_host(host); 4863 if (ret) 4864 return ret; 4865 4866 ret = __sdhci_add_host(host); 4867 if (ret) 4868 goto cleanup; 4869 4870 return 0; 4871 4872 cleanup: 4873 sdhci_cleanup_host(host); 4874 4875 return ret; 4876 } 4877 EXPORT_SYMBOL_GPL(sdhci_add_host); 4878 4879 void sdhci_remove_host(struct sdhci_host *host, int dead) 4880 { 4881 struct mmc_host *mmc = host->mmc; 4882 unsigned long flags; 4883 4884 if (dead) { 4885 spin_lock_irqsave(&host->lock, flags); 4886 4887 host->flags |= SDHCI_DEVICE_DEAD; 4888 4889 if (sdhci_has_requests(host)) { 4890 pr_err("%s: Controller removed during " 4891 " transfer!\n", mmc_hostname(mmc)); 4892 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4893 } 4894 4895 spin_unlock_irqrestore(&host->lock, flags); 4896 } 4897 4898 sdhci_disable_card_detection(host); 4899 4900 mmc_remove_host(mmc); 4901 4902 sdhci_led_unregister(host); 4903 4904 if (!dead) 4905 sdhci_reset_for_all(host); 4906 4907 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4908 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4909 free_irq(host->irq, host); 4910 4911 del_timer_sync(&host->timer); 4912 del_timer_sync(&host->data_timer); 4913 4914 destroy_workqueue(host->complete_wq); 4915 4916 if (host->sdhci_core_to_disable_vqmmc) 4917 regulator_disable(mmc->supply.vqmmc); 4918 4919 if (host->align_buffer) 4920 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4921 host->adma_table_sz, host->align_buffer, 4922 host->align_addr); 4923 4924 if (host->use_external_dma) 4925 sdhci_external_dma_release(host); 4926 4927 host->adma_table = NULL; 4928 host->align_buffer = NULL; 4929 } 4930 4931 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4932 4933 void sdhci_free_host(struct sdhci_host *host) 4934 { 4935 mmc_free_host(host->mmc); 4936 } 4937 4938 EXPORT_SYMBOL_GPL(sdhci_free_host); 4939 4940 /*****************************************************************************\ 4941 * * 4942 * Driver init/exit * 4943 * * 4944 \*****************************************************************************/ 4945 4946 static int __init sdhci_drv_init(void) 4947 { 4948 pr_info(DRIVER_NAME 4949 ": Secure Digital Host Controller Interface driver\n"); 4950 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4951 4952 return 0; 4953 } 4954 4955 static void __exit sdhci_drv_exit(void) 4956 { 4957 } 4958 4959 module_init(sdhci_drv_init); 4960 module_exit(sdhci_drv_exit); 4961 4962 module_param(debug_quirks, uint, 0444); 4963 module_param(debug_quirks2, uint, 0444); 4964 4965 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4966 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4967 MODULE_LICENSE("GPL"); 4968 4969 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4970 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4971