1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/ktime.h> 14 #include <linux/highmem.h> 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/scatterlist.h> 20 #include <linux/sizes.h> 21 #include <linux/swiotlb.h> 22 #include <linux/regulator/consumer.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/of.h> 25 26 #include <linux/leds.h> 27 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/sdio.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #include "sdhci.h" 35 36 #define DRIVER_NAME "sdhci" 37 38 #define DBG(f, x...) \ 39 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 40 41 #define SDHCI_DUMP(f, x...) \ 42 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 43 44 #define MAX_TUNING_LOOP 40 45 46 static unsigned int debug_quirks = 0; 47 static unsigned int debug_quirks2; 48 49 static void sdhci_finish_data(struct sdhci_host *); 50 51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 52 53 void sdhci_dumpregs(struct sdhci_host *host) 54 { 55 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 56 57 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 58 sdhci_readl(host, SDHCI_DMA_ADDRESS), 59 sdhci_readw(host, SDHCI_HOST_VERSION)); 60 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 61 sdhci_readw(host, SDHCI_BLOCK_SIZE), 62 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 63 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 64 sdhci_readl(host, SDHCI_ARGUMENT), 65 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 66 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 67 sdhci_readl(host, SDHCI_PRESENT_STATE), 68 sdhci_readb(host, SDHCI_HOST_CONTROL)); 69 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 70 sdhci_readb(host, SDHCI_POWER_CONTROL), 71 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 72 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 73 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 74 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 75 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 76 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 77 sdhci_readl(host, SDHCI_INT_STATUS)); 78 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 79 sdhci_readl(host, SDHCI_INT_ENABLE), 80 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 81 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 82 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 83 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 84 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 85 sdhci_readl(host, SDHCI_CAPABILITIES), 86 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 87 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 88 sdhci_readw(host, SDHCI_COMMAND), 89 sdhci_readl(host, SDHCI_MAX_CURRENT)); 90 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 91 sdhci_readl(host, SDHCI_RESPONSE), 92 sdhci_readl(host, SDHCI_RESPONSE + 4)); 93 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 94 sdhci_readl(host, SDHCI_RESPONSE + 8), 95 sdhci_readl(host, SDHCI_RESPONSE + 12)); 96 SDHCI_DUMP("Host ctl2: 0x%08x\n", 97 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 98 99 if (host->flags & SDHCI_USE_ADMA) { 100 if (host->flags & SDHCI_USE_64_BIT_DMA) { 101 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 102 sdhci_readl(host, SDHCI_ADMA_ERROR), 103 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 105 } else { 106 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 107 sdhci_readl(host, SDHCI_ADMA_ERROR), 108 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 109 } 110 } 111 112 SDHCI_DUMP("============================================\n"); 113 } 114 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 115 116 /*****************************************************************************\ 117 * * 118 * Low level functions * 119 * * 120 \*****************************************************************************/ 121 122 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 123 { 124 u16 ctrl2; 125 126 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 127 if (ctrl2 & SDHCI_CTRL_V4_MODE) 128 return; 129 130 ctrl2 |= SDHCI_CTRL_V4_MODE; 131 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 132 } 133 134 /* 135 * This can be called before sdhci_add_host() by Vendor's host controller 136 * driver to enable v4 mode if supported. 137 */ 138 void sdhci_enable_v4_mode(struct sdhci_host *host) 139 { 140 host->v4_mode = true; 141 sdhci_do_enable_v4_mode(host); 142 } 143 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 144 145 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 146 { 147 return cmd->data || cmd->flags & MMC_RSP_BUSY; 148 } 149 150 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 151 { 152 u32 present; 153 154 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 155 !mmc_card_is_removable(host->mmc)) 156 return; 157 158 if (enable) { 159 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 160 SDHCI_CARD_PRESENT; 161 162 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 163 SDHCI_INT_CARD_INSERT; 164 } else { 165 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 166 } 167 168 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 169 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 170 } 171 172 static void sdhci_enable_card_detection(struct sdhci_host *host) 173 { 174 sdhci_set_card_detection(host, true); 175 } 176 177 static void sdhci_disable_card_detection(struct sdhci_host *host) 178 { 179 sdhci_set_card_detection(host, false); 180 } 181 182 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 183 { 184 if (host->bus_on) 185 return; 186 host->bus_on = true; 187 pm_runtime_get_noresume(host->mmc->parent); 188 } 189 190 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 191 { 192 if (!host->bus_on) 193 return; 194 host->bus_on = false; 195 pm_runtime_put_noidle(host->mmc->parent); 196 } 197 198 void sdhci_reset(struct sdhci_host *host, u8 mask) 199 { 200 ktime_t timeout; 201 202 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 203 204 if (mask & SDHCI_RESET_ALL) { 205 host->clock = 0; 206 /* Reset-all turns off SD Bus Power */ 207 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 208 sdhci_runtime_pm_bus_off(host); 209 } 210 211 /* Wait max 100 ms */ 212 timeout = ktime_add_ms(ktime_get(), 100); 213 214 /* hw clears the bit when it's done */ 215 while (1) { 216 bool timedout = ktime_after(ktime_get(), timeout); 217 218 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 219 break; 220 if (timedout) { 221 pr_err("%s: Reset 0x%x never completed.\n", 222 mmc_hostname(host->mmc), (int)mask); 223 sdhci_dumpregs(host); 224 return; 225 } 226 udelay(10); 227 } 228 } 229 EXPORT_SYMBOL_GPL(sdhci_reset); 230 231 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 232 { 233 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 234 struct mmc_host *mmc = host->mmc; 235 236 if (!mmc->ops->get_cd(mmc)) 237 return; 238 } 239 240 host->ops->reset(host, mask); 241 242 if (mask & SDHCI_RESET_ALL) { 243 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 244 if (host->ops->enable_dma) 245 host->ops->enable_dma(host); 246 } 247 248 /* Resetting the controller clears many */ 249 host->preset_enabled = false; 250 } 251 } 252 253 static void sdhci_set_default_irqs(struct sdhci_host *host) 254 { 255 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 256 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 257 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 258 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 259 SDHCI_INT_RESPONSE; 260 261 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 262 host->tuning_mode == SDHCI_TUNING_MODE_3) 263 host->ier |= SDHCI_INT_RETUNE; 264 265 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 266 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 267 } 268 269 static void sdhci_config_dma(struct sdhci_host *host) 270 { 271 u8 ctrl; 272 u16 ctrl2; 273 274 if (host->version < SDHCI_SPEC_200) 275 return; 276 277 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 278 279 /* 280 * Always adjust the DMA selection as some controllers 281 * (e.g. JMicron) can't do PIO properly when the selection 282 * is ADMA. 283 */ 284 ctrl &= ~SDHCI_CTRL_DMA_MASK; 285 if (!(host->flags & SDHCI_REQ_USE_DMA)) 286 goto out; 287 288 /* Note if DMA Select is zero then SDMA is selected */ 289 if (host->flags & SDHCI_USE_ADMA) 290 ctrl |= SDHCI_CTRL_ADMA32; 291 292 if (host->flags & SDHCI_USE_64_BIT_DMA) { 293 /* 294 * If v4 mode, all supported DMA can be 64-bit addressing if 295 * controller supports 64-bit system address, otherwise only 296 * ADMA can support 64-bit addressing. 297 */ 298 if (host->v4_mode) { 299 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 300 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 301 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 302 } else if (host->flags & SDHCI_USE_ADMA) { 303 /* 304 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 305 * set SDHCI_CTRL_ADMA64. 306 */ 307 ctrl |= SDHCI_CTRL_ADMA64; 308 } 309 } 310 311 out: 312 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 313 } 314 315 static void sdhci_init(struct sdhci_host *host, int soft) 316 { 317 struct mmc_host *mmc = host->mmc; 318 319 if (soft) 320 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 321 else 322 sdhci_do_reset(host, SDHCI_RESET_ALL); 323 324 if (host->v4_mode) 325 sdhci_do_enable_v4_mode(host); 326 327 sdhci_set_default_irqs(host); 328 329 host->cqe_on = false; 330 331 if (soft) { 332 /* force clock reconfiguration */ 333 host->clock = 0; 334 mmc->ops->set_ios(mmc, &mmc->ios); 335 } 336 } 337 338 static void sdhci_reinit(struct sdhci_host *host) 339 { 340 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 341 342 sdhci_init(host, 0); 343 sdhci_enable_card_detection(host); 344 345 /* 346 * A change to the card detect bits indicates a change in present state, 347 * refer sdhci_set_card_detection(). A card detect interrupt might have 348 * been missed while the host controller was being reset, so trigger a 349 * rescan to check. 350 */ 351 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 352 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 353 } 354 355 static void __sdhci_led_activate(struct sdhci_host *host) 356 { 357 u8 ctrl; 358 359 if (host->quirks & SDHCI_QUIRK_NO_LED) 360 return; 361 362 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 363 ctrl |= SDHCI_CTRL_LED; 364 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 365 } 366 367 static void __sdhci_led_deactivate(struct sdhci_host *host) 368 { 369 u8 ctrl; 370 371 if (host->quirks & SDHCI_QUIRK_NO_LED) 372 return; 373 374 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 375 ctrl &= ~SDHCI_CTRL_LED; 376 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 377 } 378 379 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 380 static void sdhci_led_control(struct led_classdev *led, 381 enum led_brightness brightness) 382 { 383 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 384 unsigned long flags; 385 386 spin_lock_irqsave(&host->lock, flags); 387 388 if (host->runtime_suspended) 389 goto out; 390 391 if (brightness == LED_OFF) 392 __sdhci_led_deactivate(host); 393 else 394 __sdhci_led_activate(host); 395 out: 396 spin_unlock_irqrestore(&host->lock, flags); 397 } 398 399 static int sdhci_led_register(struct sdhci_host *host) 400 { 401 struct mmc_host *mmc = host->mmc; 402 403 if (host->quirks & SDHCI_QUIRK_NO_LED) 404 return 0; 405 406 snprintf(host->led_name, sizeof(host->led_name), 407 "%s::", mmc_hostname(mmc)); 408 409 host->led.name = host->led_name; 410 host->led.brightness = LED_OFF; 411 host->led.default_trigger = mmc_hostname(mmc); 412 host->led.brightness_set = sdhci_led_control; 413 414 return led_classdev_register(mmc_dev(mmc), &host->led); 415 } 416 417 static void sdhci_led_unregister(struct sdhci_host *host) 418 { 419 if (host->quirks & SDHCI_QUIRK_NO_LED) 420 return; 421 422 led_classdev_unregister(&host->led); 423 } 424 425 static inline void sdhci_led_activate(struct sdhci_host *host) 426 { 427 } 428 429 static inline void sdhci_led_deactivate(struct sdhci_host *host) 430 { 431 } 432 433 #else 434 435 static inline int sdhci_led_register(struct sdhci_host *host) 436 { 437 return 0; 438 } 439 440 static inline void sdhci_led_unregister(struct sdhci_host *host) 441 { 442 } 443 444 static inline void sdhci_led_activate(struct sdhci_host *host) 445 { 446 __sdhci_led_activate(host); 447 } 448 449 static inline void sdhci_led_deactivate(struct sdhci_host *host) 450 { 451 __sdhci_led_deactivate(host); 452 } 453 454 #endif 455 456 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 457 unsigned long timeout) 458 { 459 if (sdhci_data_line_cmd(mrq->cmd)) 460 mod_timer(&host->data_timer, timeout); 461 else 462 mod_timer(&host->timer, timeout); 463 } 464 465 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 466 { 467 if (sdhci_data_line_cmd(mrq->cmd)) 468 del_timer(&host->data_timer); 469 else 470 del_timer(&host->timer); 471 } 472 473 static inline bool sdhci_has_requests(struct sdhci_host *host) 474 { 475 return host->cmd || host->data_cmd; 476 } 477 478 /*****************************************************************************\ 479 * * 480 * Core functions * 481 * * 482 \*****************************************************************************/ 483 484 static void sdhci_read_block_pio(struct sdhci_host *host) 485 { 486 unsigned long flags; 487 size_t blksize, len, chunk; 488 u32 uninitialized_var(scratch); 489 u8 *buf; 490 491 DBG("PIO reading\n"); 492 493 blksize = host->data->blksz; 494 chunk = 0; 495 496 local_irq_save(flags); 497 498 while (blksize) { 499 BUG_ON(!sg_miter_next(&host->sg_miter)); 500 501 len = min(host->sg_miter.length, blksize); 502 503 blksize -= len; 504 host->sg_miter.consumed = len; 505 506 buf = host->sg_miter.addr; 507 508 while (len) { 509 if (chunk == 0) { 510 scratch = sdhci_readl(host, SDHCI_BUFFER); 511 chunk = 4; 512 } 513 514 *buf = scratch & 0xFF; 515 516 buf++; 517 scratch >>= 8; 518 chunk--; 519 len--; 520 } 521 } 522 523 sg_miter_stop(&host->sg_miter); 524 525 local_irq_restore(flags); 526 } 527 528 static void sdhci_write_block_pio(struct sdhci_host *host) 529 { 530 unsigned long flags; 531 size_t blksize, len, chunk; 532 u32 scratch; 533 u8 *buf; 534 535 DBG("PIO writing\n"); 536 537 blksize = host->data->blksz; 538 chunk = 0; 539 scratch = 0; 540 541 local_irq_save(flags); 542 543 while (blksize) { 544 BUG_ON(!sg_miter_next(&host->sg_miter)); 545 546 len = min(host->sg_miter.length, blksize); 547 548 blksize -= len; 549 host->sg_miter.consumed = len; 550 551 buf = host->sg_miter.addr; 552 553 while (len) { 554 scratch |= (u32)*buf << (chunk * 8); 555 556 buf++; 557 chunk++; 558 len--; 559 560 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 561 sdhci_writel(host, scratch, SDHCI_BUFFER); 562 chunk = 0; 563 scratch = 0; 564 } 565 } 566 } 567 568 sg_miter_stop(&host->sg_miter); 569 570 local_irq_restore(flags); 571 } 572 573 static void sdhci_transfer_pio(struct sdhci_host *host) 574 { 575 u32 mask; 576 577 if (host->blocks == 0) 578 return; 579 580 if (host->data->flags & MMC_DATA_READ) 581 mask = SDHCI_DATA_AVAILABLE; 582 else 583 mask = SDHCI_SPACE_AVAILABLE; 584 585 /* 586 * Some controllers (JMicron JMB38x) mess up the buffer bits 587 * for transfers < 4 bytes. As long as it is just one block, 588 * we can ignore the bits. 589 */ 590 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 591 (host->data->blocks == 1)) 592 mask = ~0; 593 594 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 595 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 596 udelay(100); 597 598 if (host->data->flags & MMC_DATA_READ) 599 sdhci_read_block_pio(host); 600 else 601 sdhci_write_block_pio(host); 602 603 host->blocks--; 604 if (host->blocks == 0) 605 break; 606 } 607 608 DBG("PIO transfer complete.\n"); 609 } 610 611 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 612 struct mmc_data *data, int cookie) 613 { 614 int sg_count; 615 616 /* 617 * If the data buffers are already mapped, return the previous 618 * dma_map_sg() result. 619 */ 620 if (data->host_cookie == COOKIE_PRE_MAPPED) 621 return data->sg_count; 622 623 /* Bounce write requests to the bounce buffer */ 624 if (host->bounce_buffer) { 625 unsigned int length = data->blksz * data->blocks; 626 627 if (length > host->bounce_buffer_size) { 628 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 629 mmc_hostname(host->mmc), length, 630 host->bounce_buffer_size); 631 return -EIO; 632 } 633 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 634 /* Copy the data to the bounce buffer */ 635 sg_copy_to_buffer(data->sg, data->sg_len, 636 host->bounce_buffer, 637 length); 638 } 639 /* Switch ownership to the DMA */ 640 dma_sync_single_for_device(host->mmc->parent, 641 host->bounce_addr, 642 host->bounce_buffer_size, 643 mmc_get_dma_dir(data)); 644 /* Just a dummy value */ 645 sg_count = 1; 646 } else { 647 /* Just access the data directly from memory */ 648 sg_count = dma_map_sg(mmc_dev(host->mmc), 649 data->sg, data->sg_len, 650 mmc_get_dma_dir(data)); 651 } 652 653 if (sg_count == 0) 654 return -ENOSPC; 655 656 data->sg_count = sg_count; 657 data->host_cookie = cookie; 658 659 return sg_count; 660 } 661 662 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 663 { 664 local_irq_save(*flags); 665 return kmap_atomic(sg_page(sg)) + sg->offset; 666 } 667 668 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 669 { 670 kunmap_atomic(buffer); 671 local_irq_restore(*flags); 672 } 673 674 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 675 dma_addr_t addr, int len, unsigned int cmd) 676 { 677 struct sdhci_adma2_64_desc *dma_desc = *desc; 678 679 /* 32-bit and 64-bit descriptors have these members in same position */ 680 dma_desc->cmd = cpu_to_le16(cmd); 681 dma_desc->len = cpu_to_le16(len); 682 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 683 684 if (host->flags & SDHCI_USE_64_BIT_DMA) 685 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 686 687 *desc += host->desc_sz; 688 } 689 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 690 691 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 692 void **desc, dma_addr_t addr, 693 int len, unsigned int cmd) 694 { 695 if (host->ops->adma_write_desc) 696 host->ops->adma_write_desc(host, desc, addr, len, cmd); 697 else 698 sdhci_adma_write_desc(host, desc, addr, len, cmd); 699 } 700 701 static void sdhci_adma_mark_end(void *desc) 702 { 703 struct sdhci_adma2_64_desc *dma_desc = desc; 704 705 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 706 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 707 } 708 709 static void sdhci_adma_table_pre(struct sdhci_host *host, 710 struct mmc_data *data, int sg_count) 711 { 712 struct scatterlist *sg; 713 unsigned long flags; 714 dma_addr_t addr, align_addr; 715 void *desc, *align; 716 char *buffer; 717 int len, offset, i; 718 719 /* 720 * The spec does not specify endianness of descriptor table. 721 * We currently guess that it is LE. 722 */ 723 724 host->sg_count = sg_count; 725 726 desc = host->adma_table; 727 align = host->align_buffer; 728 729 align_addr = host->align_addr; 730 731 for_each_sg(data->sg, sg, host->sg_count, i) { 732 addr = sg_dma_address(sg); 733 len = sg_dma_len(sg); 734 735 /* 736 * The SDHCI specification states that ADMA addresses must 737 * be 32-bit aligned. If they aren't, then we use a bounce 738 * buffer for the (up to three) bytes that screw up the 739 * alignment. 740 */ 741 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 742 SDHCI_ADMA2_MASK; 743 if (offset) { 744 if (data->flags & MMC_DATA_WRITE) { 745 buffer = sdhci_kmap_atomic(sg, &flags); 746 memcpy(align, buffer, offset); 747 sdhci_kunmap_atomic(buffer, &flags); 748 } 749 750 /* tran, valid */ 751 __sdhci_adma_write_desc(host, &desc, align_addr, 752 offset, ADMA2_TRAN_VALID); 753 754 BUG_ON(offset > 65536); 755 756 align += SDHCI_ADMA2_ALIGN; 757 align_addr += SDHCI_ADMA2_ALIGN; 758 759 addr += offset; 760 len -= offset; 761 } 762 763 BUG_ON(len > 65536); 764 765 /* tran, valid */ 766 if (len) 767 __sdhci_adma_write_desc(host, &desc, addr, len, 768 ADMA2_TRAN_VALID); 769 770 /* 771 * If this triggers then we have a calculation bug 772 * somewhere. :/ 773 */ 774 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 775 } 776 777 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 778 /* Mark the last descriptor as the terminating descriptor */ 779 if (desc != host->adma_table) { 780 desc -= host->desc_sz; 781 sdhci_adma_mark_end(desc); 782 } 783 } else { 784 /* Add a terminating entry - nop, end, valid */ 785 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 786 } 787 } 788 789 static void sdhci_adma_table_post(struct sdhci_host *host, 790 struct mmc_data *data) 791 { 792 struct scatterlist *sg; 793 int i, size; 794 void *align; 795 char *buffer; 796 unsigned long flags; 797 798 if (data->flags & MMC_DATA_READ) { 799 bool has_unaligned = false; 800 801 /* Do a quick scan of the SG list for any unaligned mappings */ 802 for_each_sg(data->sg, sg, host->sg_count, i) 803 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 804 has_unaligned = true; 805 break; 806 } 807 808 if (has_unaligned) { 809 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 810 data->sg_len, DMA_FROM_DEVICE); 811 812 align = host->align_buffer; 813 814 for_each_sg(data->sg, sg, host->sg_count, i) { 815 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 816 size = SDHCI_ADMA2_ALIGN - 817 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 818 819 buffer = sdhci_kmap_atomic(sg, &flags); 820 memcpy(buffer, align, size); 821 sdhci_kunmap_atomic(buffer, &flags); 822 823 align += SDHCI_ADMA2_ALIGN; 824 } 825 } 826 } 827 } 828 } 829 830 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 831 { 832 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 833 if (host->flags & SDHCI_USE_64_BIT_DMA) 834 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 835 } 836 837 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 838 { 839 if (host->bounce_buffer) 840 return host->bounce_addr; 841 else 842 return sg_dma_address(host->data->sg); 843 } 844 845 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 846 { 847 if (host->v4_mode) 848 sdhci_set_adma_addr(host, addr); 849 else 850 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 851 } 852 853 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 854 struct mmc_command *cmd, 855 struct mmc_data *data) 856 { 857 unsigned int target_timeout; 858 859 /* timeout in us */ 860 if (!data) { 861 target_timeout = cmd->busy_timeout * 1000; 862 } else { 863 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 864 if (host->clock && data->timeout_clks) { 865 unsigned long long val; 866 867 /* 868 * data->timeout_clks is in units of clock cycles. 869 * host->clock is in Hz. target_timeout is in us. 870 * Hence, us = 1000000 * cycles / Hz. Round up. 871 */ 872 val = 1000000ULL * data->timeout_clks; 873 if (do_div(val, host->clock)) 874 target_timeout++; 875 target_timeout += val; 876 } 877 } 878 879 return target_timeout; 880 } 881 882 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 883 struct mmc_command *cmd) 884 { 885 struct mmc_data *data = cmd->data; 886 struct mmc_host *mmc = host->mmc; 887 struct mmc_ios *ios = &mmc->ios; 888 unsigned char bus_width = 1 << ios->bus_width; 889 unsigned int blksz; 890 unsigned int freq; 891 u64 target_timeout; 892 u64 transfer_time; 893 894 target_timeout = sdhci_target_timeout(host, cmd, data); 895 target_timeout *= NSEC_PER_USEC; 896 897 if (data) { 898 blksz = data->blksz; 899 freq = host->mmc->actual_clock ? : host->clock; 900 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 901 do_div(transfer_time, freq); 902 /* multiply by '2' to account for any unknowns */ 903 transfer_time = transfer_time * 2; 904 /* calculate timeout for the entire data */ 905 host->data_timeout = data->blocks * target_timeout + 906 transfer_time; 907 } else { 908 host->data_timeout = target_timeout; 909 } 910 911 if (host->data_timeout) 912 host->data_timeout += MMC_CMD_TRANSFER_TIME; 913 } 914 915 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 916 bool *too_big) 917 { 918 u8 count; 919 struct mmc_data *data; 920 unsigned target_timeout, current_timeout; 921 922 *too_big = true; 923 924 /* 925 * If the host controller provides us with an incorrect timeout 926 * value, just skip the check and use 0xE. The hardware may take 927 * longer to time out, but that's much better than having a too-short 928 * timeout value. 929 */ 930 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 931 return 0xE; 932 933 /* Unspecified command, asume max */ 934 if (cmd == NULL) 935 return 0xE; 936 937 data = cmd->data; 938 /* Unspecified timeout, assume max */ 939 if (!data && !cmd->busy_timeout) 940 return 0xE; 941 942 /* timeout in us */ 943 target_timeout = sdhci_target_timeout(host, cmd, data); 944 945 /* 946 * Figure out needed cycles. 947 * We do this in steps in order to fit inside a 32 bit int. 948 * The first step is the minimum timeout, which will have a 949 * minimum resolution of 6 bits: 950 * (1) 2^13*1000 > 2^22, 951 * (2) host->timeout_clk < 2^16 952 * => 953 * (1) / (2) > 2^6 954 */ 955 count = 0; 956 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 957 while (current_timeout < target_timeout) { 958 count++; 959 current_timeout <<= 1; 960 if (count >= 0xF) 961 break; 962 } 963 964 if (count >= 0xF) { 965 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 966 DBG("Too large timeout 0x%x requested for CMD%d!\n", 967 count, cmd->opcode); 968 count = 0xE; 969 } else { 970 *too_big = false; 971 } 972 973 return count; 974 } 975 976 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 977 { 978 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 979 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 980 981 if (host->flags & SDHCI_REQ_USE_DMA) 982 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 983 else 984 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 985 986 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 987 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 988 else 989 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 990 991 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 992 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 993 } 994 995 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 996 { 997 if (enable) 998 host->ier |= SDHCI_INT_DATA_TIMEOUT; 999 else 1000 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1001 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1002 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1003 } 1004 1005 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1006 { 1007 u8 count; 1008 1009 if (host->ops->set_timeout) { 1010 host->ops->set_timeout(host, cmd); 1011 } else { 1012 bool too_big = false; 1013 1014 count = sdhci_calc_timeout(host, cmd, &too_big); 1015 1016 if (too_big && 1017 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1018 sdhci_calc_sw_timeout(host, cmd); 1019 sdhci_set_data_timeout_irq(host, false); 1020 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1021 sdhci_set_data_timeout_irq(host, true); 1022 } 1023 1024 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1025 } 1026 } 1027 1028 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1029 { 1030 struct mmc_data *data = cmd->data; 1031 1032 host->data_timeout = 0; 1033 1034 if (sdhci_data_line_cmd(cmd)) 1035 sdhci_set_timeout(host, cmd); 1036 1037 if (!data) 1038 return; 1039 1040 WARN_ON(host->data); 1041 1042 /* Sanity checks */ 1043 BUG_ON(data->blksz * data->blocks > 524288); 1044 BUG_ON(data->blksz > host->mmc->max_blk_size); 1045 BUG_ON(data->blocks > 65535); 1046 1047 host->data = data; 1048 host->data_early = 0; 1049 host->data->bytes_xfered = 0; 1050 1051 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1052 struct scatterlist *sg; 1053 unsigned int length_mask, offset_mask; 1054 int i; 1055 1056 host->flags |= SDHCI_REQ_USE_DMA; 1057 1058 /* 1059 * FIXME: This doesn't account for merging when mapping the 1060 * scatterlist. 1061 * 1062 * The assumption here being that alignment and lengths are 1063 * the same after DMA mapping to device address space. 1064 */ 1065 length_mask = 0; 1066 offset_mask = 0; 1067 if (host->flags & SDHCI_USE_ADMA) { 1068 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1069 length_mask = 3; 1070 /* 1071 * As we use up to 3 byte chunks to work 1072 * around alignment problems, we need to 1073 * check the offset as well. 1074 */ 1075 offset_mask = 3; 1076 } 1077 } else { 1078 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1079 length_mask = 3; 1080 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1081 offset_mask = 3; 1082 } 1083 1084 if (unlikely(length_mask | offset_mask)) { 1085 for_each_sg(data->sg, sg, data->sg_len, i) { 1086 if (sg->length & length_mask) { 1087 DBG("Reverting to PIO because of transfer size (%d)\n", 1088 sg->length); 1089 host->flags &= ~SDHCI_REQ_USE_DMA; 1090 break; 1091 } 1092 if (sg->offset & offset_mask) { 1093 DBG("Reverting to PIO because of bad alignment\n"); 1094 host->flags &= ~SDHCI_REQ_USE_DMA; 1095 break; 1096 } 1097 } 1098 } 1099 } 1100 1101 if (host->flags & SDHCI_REQ_USE_DMA) { 1102 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1103 1104 if (sg_cnt <= 0) { 1105 /* 1106 * This only happens when someone fed 1107 * us an invalid request. 1108 */ 1109 WARN_ON(1); 1110 host->flags &= ~SDHCI_REQ_USE_DMA; 1111 } else if (host->flags & SDHCI_USE_ADMA) { 1112 sdhci_adma_table_pre(host, data, sg_cnt); 1113 sdhci_set_adma_addr(host, host->adma_addr); 1114 } else { 1115 WARN_ON(sg_cnt != 1); 1116 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1117 } 1118 } 1119 1120 sdhci_config_dma(host); 1121 1122 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1123 int flags; 1124 1125 flags = SG_MITER_ATOMIC; 1126 if (host->data->flags & MMC_DATA_READ) 1127 flags |= SG_MITER_TO_SG; 1128 else 1129 flags |= SG_MITER_FROM_SG; 1130 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1131 host->blocks = data->blocks; 1132 } 1133 1134 sdhci_set_transfer_irqs(host); 1135 1136 /* Set the DMA boundary value and block size */ 1137 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1138 SDHCI_BLOCK_SIZE); 1139 1140 /* 1141 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1142 * can be supported, in that case 16-bit block count register must be 0. 1143 */ 1144 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1145 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1146 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1147 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1148 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1149 } else { 1150 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1151 } 1152 } 1153 1154 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1155 struct mmc_request *mrq) 1156 { 1157 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1158 !mrq->cap_cmd_during_tfr; 1159 } 1160 1161 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1162 struct mmc_command *cmd, 1163 u16 *mode) 1164 { 1165 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1166 (cmd->opcode != SD_IO_RW_EXTENDED); 1167 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1168 u16 ctrl2; 1169 1170 /* 1171 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1172 * Select' is recommended rather than use of 'Auto CMD12 1173 * Enable' or 'Auto CMD23 Enable'. 1174 */ 1175 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { 1176 *mode |= SDHCI_TRNS_AUTO_SEL; 1177 1178 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1179 if (use_cmd23) 1180 ctrl2 |= SDHCI_CMD23_ENABLE; 1181 else 1182 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1183 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1184 1185 return; 1186 } 1187 1188 /* 1189 * If we are sending CMD23, CMD12 never gets sent 1190 * on successful completion (so no Auto-CMD12). 1191 */ 1192 if (use_cmd12) 1193 *mode |= SDHCI_TRNS_AUTO_CMD12; 1194 else if (use_cmd23) 1195 *mode |= SDHCI_TRNS_AUTO_CMD23; 1196 } 1197 1198 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1199 struct mmc_command *cmd) 1200 { 1201 u16 mode = 0; 1202 struct mmc_data *data = cmd->data; 1203 1204 if (data == NULL) { 1205 if (host->quirks2 & 1206 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1207 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1208 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1209 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1210 } else { 1211 /* clear Auto CMD settings for no data CMDs */ 1212 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1213 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1214 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1215 } 1216 return; 1217 } 1218 1219 WARN_ON(!host->data); 1220 1221 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1222 mode = SDHCI_TRNS_BLK_CNT_EN; 1223 1224 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1225 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1226 sdhci_auto_cmd_select(host, cmd, &mode); 1227 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) 1228 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1229 } 1230 1231 if (data->flags & MMC_DATA_READ) 1232 mode |= SDHCI_TRNS_READ; 1233 if (host->flags & SDHCI_REQ_USE_DMA) 1234 mode |= SDHCI_TRNS_DMA; 1235 1236 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1237 } 1238 1239 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1240 { 1241 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1242 ((mrq->cmd && mrq->cmd->error) || 1243 (mrq->sbc && mrq->sbc->error) || 1244 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1245 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1246 } 1247 1248 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1249 { 1250 int i; 1251 1252 if (host->cmd && host->cmd->mrq == mrq) 1253 host->cmd = NULL; 1254 1255 if (host->data_cmd && host->data_cmd->mrq == mrq) 1256 host->data_cmd = NULL; 1257 1258 if (host->data && host->data->mrq == mrq) 1259 host->data = NULL; 1260 1261 if (sdhci_needs_reset(host, mrq)) 1262 host->pending_reset = true; 1263 1264 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1265 if (host->mrqs_done[i] == mrq) { 1266 WARN_ON(1); 1267 return; 1268 } 1269 } 1270 1271 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1272 if (!host->mrqs_done[i]) { 1273 host->mrqs_done[i] = mrq; 1274 break; 1275 } 1276 } 1277 1278 WARN_ON(i >= SDHCI_MAX_MRQS); 1279 1280 sdhci_del_timer(host, mrq); 1281 1282 if (!sdhci_has_requests(host)) 1283 sdhci_led_deactivate(host); 1284 } 1285 1286 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1287 { 1288 __sdhci_finish_mrq(host, mrq); 1289 1290 queue_work(host->complete_wq, &host->complete_work); 1291 } 1292 1293 static void sdhci_finish_data(struct sdhci_host *host) 1294 { 1295 struct mmc_command *data_cmd = host->data_cmd; 1296 struct mmc_data *data = host->data; 1297 1298 host->data = NULL; 1299 host->data_cmd = NULL; 1300 1301 /* 1302 * The controller needs a reset of internal state machines upon error 1303 * conditions. 1304 */ 1305 if (data->error) { 1306 if (!host->cmd || host->cmd == data_cmd) 1307 sdhci_do_reset(host, SDHCI_RESET_CMD); 1308 sdhci_do_reset(host, SDHCI_RESET_DATA); 1309 } 1310 1311 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1312 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1313 sdhci_adma_table_post(host, data); 1314 1315 /* 1316 * The specification states that the block count register must 1317 * be updated, but it does not specify at what point in the 1318 * data flow. That makes the register entirely useless to read 1319 * back so we have to assume that nothing made it to the card 1320 * in the event of an error. 1321 */ 1322 if (data->error) 1323 data->bytes_xfered = 0; 1324 else 1325 data->bytes_xfered = data->blksz * data->blocks; 1326 1327 /* 1328 * Need to send CMD12 if - 1329 * a) open-ended multiblock transfer (no CMD23) 1330 * b) error in multiblock transfer 1331 */ 1332 if (data->stop && 1333 (data->error || 1334 !data->mrq->sbc)) { 1335 /* 1336 * 'cap_cmd_during_tfr' request must not use the command line 1337 * after mmc_command_done() has been called. It is upper layer's 1338 * responsibility to send the stop command if required. 1339 */ 1340 if (data->mrq->cap_cmd_during_tfr) { 1341 __sdhci_finish_mrq(host, data->mrq); 1342 } else { 1343 /* Avoid triggering warning in sdhci_send_command() */ 1344 host->cmd = NULL; 1345 sdhci_send_command(host, data->stop); 1346 } 1347 } else { 1348 __sdhci_finish_mrq(host, data->mrq); 1349 } 1350 } 1351 1352 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1353 { 1354 int flags; 1355 u32 mask; 1356 unsigned long timeout; 1357 1358 WARN_ON(host->cmd); 1359 1360 /* Initially, a command has no error */ 1361 cmd->error = 0; 1362 1363 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1364 cmd->opcode == MMC_STOP_TRANSMISSION) 1365 cmd->flags |= MMC_RSP_BUSY; 1366 1367 /* Wait max 10 ms */ 1368 timeout = 10; 1369 1370 mask = SDHCI_CMD_INHIBIT; 1371 if (sdhci_data_line_cmd(cmd)) 1372 mask |= SDHCI_DATA_INHIBIT; 1373 1374 /* We shouldn't wait for data inihibit for stop commands, even 1375 though they might use busy signaling */ 1376 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1377 mask &= ~SDHCI_DATA_INHIBIT; 1378 1379 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1380 if (timeout == 0) { 1381 pr_err("%s: Controller never released inhibit bit(s).\n", 1382 mmc_hostname(host->mmc)); 1383 sdhci_dumpregs(host); 1384 cmd->error = -EIO; 1385 sdhci_finish_mrq(host, cmd->mrq); 1386 return; 1387 } 1388 timeout--; 1389 mdelay(1); 1390 } 1391 1392 host->cmd = cmd; 1393 if (sdhci_data_line_cmd(cmd)) { 1394 WARN_ON(host->data_cmd); 1395 host->data_cmd = cmd; 1396 } 1397 1398 sdhci_prepare_data(host, cmd); 1399 1400 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1401 1402 sdhci_set_transfer_mode(host, cmd); 1403 1404 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1405 pr_err("%s: Unsupported response type!\n", 1406 mmc_hostname(host->mmc)); 1407 cmd->error = -EINVAL; 1408 sdhci_finish_mrq(host, cmd->mrq); 1409 return; 1410 } 1411 1412 if (!(cmd->flags & MMC_RSP_PRESENT)) 1413 flags = SDHCI_CMD_RESP_NONE; 1414 else if (cmd->flags & MMC_RSP_136) 1415 flags = SDHCI_CMD_RESP_LONG; 1416 else if (cmd->flags & MMC_RSP_BUSY) 1417 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1418 else 1419 flags = SDHCI_CMD_RESP_SHORT; 1420 1421 if (cmd->flags & MMC_RSP_CRC) 1422 flags |= SDHCI_CMD_CRC; 1423 if (cmd->flags & MMC_RSP_OPCODE) 1424 flags |= SDHCI_CMD_INDEX; 1425 1426 /* CMD19 is special in that the Data Present Select should be set */ 1427 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1428 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1429 flags |= SDHCI_CMD_DATA; 1430 1431 timeout = jiffies; 1432 if (host->data_timeout) 1433 timeout += nsecs_to_jiffies(host->data_timeout); 1434 else if (!cmd->data && cmd->busy_timeout > 9000) 1435 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1436 else 1437 timeout += 10 * HZ; 1438 sdhci_mod_timer(host, cmd->mrq, timeout); 1439 1440 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1441 } 1442 EXPORT_SYMBOL_GPL(sdhci_send_command); 1443 1444 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1445 { 1446 int i, reg; 1447 1448 for (i = 0; i < 4; i++) { 1449 reg = SDHCI_RESPONSE + (3 - i) * 4; 1450 cmd->resp[i] = sdhci_readl(host, reg); 1451 } 1452 1453 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1454 return; 1455 1456 /* CRC is stripped so we need to do some shifting */ 1457 for (i = 0; i < 4; i++) { 1458 cmd->resp[i] <<= 8; 1459 if (i != 3) 1460 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1461 } 1462 } 1463 1464 static void sdhci_finish_command(struct sdhci_host *host) 1465 { 1466 struct mmc_command *cmd = host->cmd; 1467 1468 host->cmd = NULL; 1469 1470 if (cmd->flags & MMC_RSP_PRESENT) { 1471 if (cmd->flags & MMC_RSP_136) { 1472 sdhci_read_rsp_136(host, cmd); 1473 } else { 1474 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1475 } 1476 } 1477 1478 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1479 mmc_command_done(host->mmc, cmd->mrq); 1480 1481 /* 1482 * The host can send and interrupt when the busy state has 1483 * ended, allowing us to wait without wasting CPU cycles. 1484 * The busy signal uses DAT0 so this is similar to waiting 1485 * for data to complete. 1486 * 1487 * Note: The 1.0 specification is a bit ambiguous about this 1488 * feature so there might be some problems with older 1489 * controllers. 1490 */ 1491 if (cmd->flags & MMC_RSP_BUSY) { 1492 if (cmd->data) { 1493 DBG("Cannot wait for busy signal when also doing a data transfer"); 1494 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1495 cmd == host->data_cmd) { 1496 /* Command complete before busy is ended */ 1497 return; 1498 } 1499 } 1500 1501 /* Finished CMD23, now send actual command. */ 1502 if (cmd == cmd->mrq->sbc) { 1503 sdhci_send_command(host, cmd->mrq->cmd); 1504 } else { 1505 1506 /* Processed actual command. */ 1507 if (host->data && host->data_early) 1508 sdhci_finish_data(host); 1509 1510 if (!cmd->data) 1511 __sdhci_finish_mrq(host, cmd->mrq); 1512 } 1513 } 1514 1515 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1516 { 1517 u16 preset = 0; 1518 1519 switch (host->timing) { 1520 case MMC_TIMING_UHS_SDR12: 1521 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1522 break; 1523 case MMC_TIMING_UHS_SDR25: 1524 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1525 break; 1526 case MMC_TIMING_UHS_SDR50: 1527 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1528 break; 1529 case MMC_TIMING_UHS_SDR104: 1530 case MMC_TIMING_MMC_HS200: 1531 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1532 break; 1533 case MMC_TIMING_UHS_DDR50: 1534 case MMC_TIMING_MMC_DDR52: 1535 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1536 break; 1537 case MMC_TIMING_MMC_HS400: 1538 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1539 break; 1540 default: 1541 pr_warn("%s: Invalid UHS-I mode selected\n", 1542 mmc_hostname(host->mmc)); 1543 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1544 break; 1545 } 1546 return preset; 1547 } 1548 1549 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1550 unsigned int *actual_clock) 1551 { 1552 int div = 0; /* Initialized for compiler warning */ 1553 int real_div = div, clk_mul = 1; 1554 u16 clk = 0; 1555 bool switch_base_clk = false; 1556 1557 if (host->version >= SDHCI_SPEC_300) { 1558 if (host->preset_enabled) { 1559 u16 pre_val; 1560 1561 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1562 pre_val = sdhci_get_preset_value(host); 1563 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1564 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1565 if (host->clk_mul && 1566 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1567 clk = SDHCI_PROG_CLOCK_MODE; 1568 real_div = div + 1; 1569 clk_mul = host->clk_mul; 1570 } else { 1571 real_div = max_t(int, 1, div << 1); 1572 } 1573 goto clock_set; 1574 } 1575 1576 /* 1577 * Check if the Host Controller supports Programmable Clock 1578 * Mode. 1579 */ 1580 if (host->clk_mul) { 1581 for (div = 1; div <= 1024; div++) { 1582 if ((host->max_clk * host->clk_mul / div) 1583 <= clock) 1584 break; 1585 } 1586 if ((host->max_clk * host->clk_mul / div) <= clock) { 1587 /* 1588 * Set Programmable Clock Mode in the Clock 1589 * Control register. 1590 */ 1591 clk = SDHCI_PROG_CLOCK_MODE; 1592 real_div = div; 1593 clk_mul = host->clk_mul; 1594 div--; 1595 } else { 1596 /* 1597 * Divisor can be too small to reach clock 1598 * speed requirement. Then use the base clock. 1599 */ 1600 switch_base_clk = true; 1601 } 1602 } 1603 1604 if (!host->clk_mul || switch_base_clk) { 1605 /* Version 3.00 divisors must be a multiple of 2. */ 1606 if (host->max_clk <= clock) 1607 div = 1; 1608 else { 1609 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1610 div += 2) { 1611 if ((host->max_clk / div) <= clock) 1612 break; 1613 } 1614 } 1615 real_div = div; 1616 div >>= 1; 1617 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1618 && !div && host->max_clk <= 25000000) 1619 div = 1; 1620 } 1621 } else { 1622 /* Version 2.00 divisors must be a power of 2. */ 1623 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1624 if ((host->max_clk / div) <= clock) 1625 break; 1626 } 1627 real_div = div; 1628 div >>= 1; 1629 } 1630 1631 clock_set: 1632 if (real_div) 1633 *actual_clock = (host->max_clk * clk_mul) / real_div; 1634 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1635 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1636 << SDHCI_DIVIDER_HI_SHIFT; 1637 1638 return clk; 1639 } 1640 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1641 1642 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1643 { 1644 ktime_t timeout; 1645 1646 clk |= SDHCI_CLOCK_INT_EN; 1647 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1648 1649 /* Wait max 150 ms */ 1650 timeout = ktime_add_ms(ktime_get(), 150); 1651 while (1) { 1652 bool timedout = ktime_after(ktime_get(), timeout); 1653 1654 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1655 if (clk & SDHCI_CLOCK_INT_STABLE) 1656 break; 1657 if (timedout) { 1658 pr_err("%s: Internal clock never stabilised.\n", 1659 mmc_hostname(host->mmc)); 1660 sdhci_dumpregs(host); 1661 return; 1662 } 1663 udelay(10); 1664 } 1665 1666 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 1667 clk |= SDHCI_CLOCK_PLL_EN; 1668 clk &= ~SDHCI_CLOCK_INT_STABLE; 1669 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1670 1671 /* Wait max 150 ms */ 1672 timeout = ktime_add_ms(ktime_get(), 150); 1673 while (1) { 1674 bool timedout = ktime_after(ktime_get(), timeout); 1675 1676 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1677 if (clk & SDHCI_CLOCK_INT_STABLE) 1678 break; 1679 if (timedout) { 1680 pr_err("%s: PLL clock never stabilised.\n", 1681 mmc_hostname(host->mmc)); 1682 sdhci_dumpregs(host); 1683 return; 1684 } 1685 udelay(10); 1686 } 1687 } 1688 1689 clk |= SDHCI_CLOCK_CARD_EN; 1690 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1691 } 1692 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1693 1694 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1695 { 1696 u16 clk; 1697 1698 host->mmc->actual_clock = 0; 1699 1700 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1701 1702 if (clock == 0) 1703 return; 1704 1705 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1706 sdhci_enable_clk(host, clk); 1707 } 1708 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1709 1710 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1711 unsigned short vdd) 1712 { 1713 struct mmc_host *mmc = host->mmc; 1714 1715 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1716 1717 if (mode != MMC_POWER_OFF) 1718 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1719 else 1720 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1721 } 1722 1723 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 1724 unsigned short vdd) 1725 { 1726 u8 pwr = 0; 1727 1728 if (mode != MMC_POWER_OFF) { 1729 switch (1 << vdd) { 1730 case MMC_VDD_165_195: 1731 /* 1732 * Without a regulator, SDHCI does not support 2.0v 1733 * so we only get here if the driver deliberately 1734 * added the 2.0v range to ocr_avail. Map it to 1.8v 1735 * for the purpose of turning on the power. 1736 */ 1737 case MMC_VDD_20_21: 1738 pwr = SDHCI_POWER_180; 1739 break; 1740 case MMC_VDD_29_30: 1741 case MMC_VDD_30_31: 1742 pwr = SDHCI_POWER_300; 1743 break; 1744 case MMC_VDD_32_33: 1745 case MMC_VDD_33_34: 1746 pwr = SDHCI_POWER_330; 1747 break; 1748 default: 1749 WARN(1, "%s: Invalid vdd %#x\n", 1750 mmc_hostname(host->mmc), vdd); 1751 break; 1752 } 1753 } 1754 1755 if (host->pwr == pwr) 1756 return; 1757 1758 host->pwr = pwr; 1759 1760 if (pwr == 0) { 1761 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1762 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1763 sdhci_runtime_pm_bus_off(host); 1764 } else { 1765 /* 1766 * Spec says that we should clear the power reg before setting 1767 * a new value. Some controllers don't seem to like this though. 1768 */ 1769 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1770 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1771 1772 /* 1773 * At least the Marvell CaFe chip gets confused if we set the 1774 * voltage and set turn on power at the same time, so set the 1775 * voltage first. 1776 */ 1777 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1778 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1779 1780 pwr |= SDHCI_POWER_ON; 1781 1782 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1783 1784 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1785 sdhci_runtime_pm_bus_on(host); 1786 1787 /* 1788 * Some controllers need an extra 10ms delay of 10ms before 1789 * they can apply clock after applying power 1790 */ 1791 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1792 mdelay(10); 1793 } 1794 } 1795 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 1796 1797 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1798 unsigned short vdd) 1799 { 1800 if (IS_ERR(host->mmc->supply.vmmc)) 1801 sdhci_set_power_noreg(host, mode, vdd); 1802 else 1803 sdhci_set_power_reg(host, mode, vdd); 1804 } 1805 EXPORT_SYMBOL_GPL(sdhci_set_power); 1806 1807 /*****************************************************************************\ 1808 * * 1809 * MMC callbacks * 1810 * * 1811 \*****************************************************************************/ 1812 1813 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1814 { 1815 struct sdhci_host *host; 1816 int present; 1817 unsigned long flags; 1818 1819 host = mmc_priv(mmc); 1820 1821 /* Firstly check card presence */ 1822 present = mmc->ops->get_cd(mmc); 1823 1824 spin_lock_irqsave(&host->lock, flags); 1825 1826 sdhci_led_activate(host); 1827 1828 /* 1829 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1830 * requests if Auto-CMD12 is enabled. 1831 */ 1832 if (sdhci_auto_cmd12(host, mrq)) { 1833 if (mrq->stop) { 1834 mrq->data->stop = NULL; 1835 mrq->stop = NULL; 1836 } 1837 } 1838 1839 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1840 mrq->cmd->error = -ENOMEDIUM; 1841 sdhci_finish_mrq(host, mrq); 1842 } else { 1843 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1844 sdhci_send_command(host, mrq->sbc); 1845 else 1846 sdhci_send_command(host, mrq->cmd); 1847 } 1848 1849 spin_unlock_irqrestore(&host->lock, flags); 1850 } 1851 EXPORT_SYMBOL_GPL(sdhci_request); 1852 1853 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1854 { 1855 u8 ctrl; 1856 1857 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1858 if (width == MMC_BUS_WIDTH_8) { 1859 ctrl &= ~SDHCI_CTRL_4BITBUS; 1860 ctrl |= SDHCI_CTRL_8BITBUS; 1861 } else { 1862 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 1863 ctrl &= ~SDHCI_CTRL_8BITBUS; 1864 if (width == MMC_BUS_WIDTH_4) 1865 ctrl |= SDHCI_CTRL_4BITBUS; 1866 else 1867 ctrl &= ~SDHCI_CTRL_4BITBUS; 1868 } 1869 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1870 } 1871 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1872 1873 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1874 { 1875 u16 ctrl_2; 1876 1877 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1878 /* Select Bus Speed Mode for host */ 1879 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1880 if ((timing == MMC_TIMING_MMC_HS200) || 1881 (timing == MMC_TIMING_UHS_SDR104)) 1882 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1883 else if (timing == MMC_TIMING_UHS_SDR12) 1884 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1885 else if (timing == MMC_TIMING_SD_HS || 1886 timing == MMC_TIMING_MMC_HS || 1887 timing == MMC_TIMING_UHS_SDR25) 1888 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1889 else if (timing == MMC_TIMING_UHS_SDR50) 1890 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1891 else if ((timing == MMC_TIMING_UHS_DDR50) || 1892 (timing == MMC_TIMING_MMC_DDR52)) 1893 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1894 else if (timing == MMC_TIMING_MMC_HS400) 1895 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1896 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1897 } 1898 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1899 1900 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1901 { 1902 struct sdhci_host *host = mmc_priv(mmc); 1903 u8 ctrl; 1904 1905 if (ios->power_mode == MMC_POWER_UNDEFINED) 1906 return; 1907 1908 if (host->flags & SDHCI_DEVICE_DEAD) { 1909 if (!IS_ERR(mmc->supply.vmmc) && 1910 ios->power_mode == MMC_POWER_OFF) 1911 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1912 return; 1913 } 1914 1915 /* 1916 * Reset the chip on each power off. 1917 * Should clear out any weird states. 1918 */ 1919 if (ios->power_mode == MMC_POWER_OFF) { 1920 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1921 sdhci_reinit(host); 1922 } 1923 1924 if (host->version >= SDHCI_SPEC_300 && 1925 (ios->power_mode == MMC_POWER_UP) && 1926 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1927 sdhci_enable_preset_value(host, false); 1928 1929 if (!ios->clock || ios->clock != host->clock) { 1930 host->ops->set_clock(host, ios->clock); 1931 host->clock = ios->clock; 1932 1933 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1934 host->clock) { 1935 host->timeout_clk = host->mmc->actual_clock ? 1936 host->mmc->actual_clock / 1000 : 1937 host->clock / 1000; 1938 host->mmc->max_busy_timeout = 1939 host->ops->get_max_timeout_count ? 1940 host->ops->get_max_timeout_count(host) : 1941 1 << 27; 1942 host->mmc->max_busy_timeout /= host->timeout_clk; 1943 } 1944 } 1945 1946 if (host->ops->set_power) 1947 host->ops->set_power(host, ios->power_mode, ios->vdd); 1948 else 1949 sdhci_set_power(host, ios->power_mode, ios->vdd); 1950 1951 if (host->ops->platform_send_init_74_clocks) 1952 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1953 1954 host->ops->set_bus_width(host, ios->bus_width); 1955 1956 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1957 1958 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 1959 if (ios->timing == MMC_TIMING_SD_HS || 1960 ios->timing == MMC_TIMING_MMC_HS || 1961 ios->timing == MMC_TIMING_MMC_HS400 || 1962 ios->timing == MMC_TIMING_MMC_HS200 || 1963 ios->timing == MMC_TIMING_MMC_DDR52 || 1964 ios->timing == MMC_TIMING_UHS_SDR50 || 1965 ios->timing == MMC_TIMING_UHS_SDR104 || 1966 ios->timing == MMC_TIMING_UHS_DDR50 || 1967 ios->timing == MMC_TIMING_UHS_SDR25) 1968 ctrl |= SDHCI_CTRL_HISPD; 1969 else 1970 ctrl &= ~SDHCI_CTRL_HISPD; 1971 } 1972 1973 if (host->version >= SDHCI_SPEC_300) { 1974 u16 clk, ctrl_2; 1975 1976 if (!host->preset_enabled) { 1977 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1978 /* 1979 * We only need to set Driver Strength if the 1980 * preset value enable is not set. 1981 */ 1982 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1983 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1984 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1985 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1986 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 1987 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1988 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1989 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1990 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 1991 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 1992 else { 1993 pr_warn("%s: invalid driver type, default to driver type B\n", 1994 mmc_hostname(mmc)); 1995 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1996 } 1997 1998 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1999 } else { 2000 /* 2001 * According to SDHC Spec v3.00, if the Preset Value 2002 * Enable in the Host Control 2 register is set, we 2003 * need to reset SD Clock Enable before changing High 2004 * Speed Enable to avoid generating clock gliches. 2005 */ 2006 2007 /* Reset SD Clock Enable */ 2008 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2009 clk &= ~SDHCI_CLOCK_CARD_EN; 2010 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2011 2012 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2013 2014 /* Re-enable SD Clock */ 2015 host->ops->set_clock(host, host->clock); 2016 } 2017 2018 /* Reset SD Clock Enable */ 2019 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2020 clk &= ~SDHCI_CLOCK_CARD_EN; 2021 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2022 2023 host->ops->set_uhs_signaling(host, ios->timing); 2024 host->timing = ios->timing; 2025 2026 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2027 ((ios->timing == MMC_TIMING_UHS_SDR12) || 2028 (ios->timing == MMC_TIMING_UHS_SDR25) || 2029 (ios->timing == MMC_TIMING_UHS_SDR50) || 2030 (ios->timing == MMC_TIMING_UHS_SDR104) || 2031 (ios->timing == MMC_TIMING_UHS_DDR50) || 2032 (ios->timing == MMC_TIMING_MMC_DDR52))) { 2033 u16 preset; 2034 2035 sdhci_enable_preset_value(host, true); 2036 preset = sdhci_get_preset_value(host); 2037 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 2038 >> SDHCI_PRESET_DRV_SHIFT; 2039 } 2040 2041 /* Re-enable SD Clock */ 2042 host->ops->set_clock(host, host->clock); 2043 } else 2044 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2045 2046 /* 2047 * Some (ENE) controllers go apeshit on some ios operation, 2048 * signalling timeout and CRC errors even on CMD0. Resetting 2049 * it on each ios seems to solve the problem. 2050 */ 2051 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 2052 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2053 } 2054 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2055 2056 static int sdhci_get_cd(struct mmc_host *mmc) 2057 { 2058 struct sdhci_host *host = mmc_priv(mmc); 2059 int gpio_cd = mmc_gpio_get_cd(mmc); 2060 2061 if (host->flags & SDHCI_DEVICE_DEAD) 2062 return 0; 2063 2064 /* If nonremovable, assume that the card is always present. */ 2065 if (!mmc_card_is_removable(host->mmc)) 2066 return 1; 2067 2068 /* 2069 * Try slot gpio detect, if defined it take precedence 2070 * over build in controller functionality 2071 */ 2072 if (gpio_cd >= 0) 2073 return !!gpio_cd; 2074 2075 /* If polling, assume that the card is always present. */ 2076 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2077 return 1; 2078 2079 /* Host native card detect */ 2080 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2081 } 2082 2083 static int sdhci_check_ro(struct sdhci_host *host) 2084 { 2085 unsigned long flags; 2086 int is_readonly; 2087 2088 spin_lock_irqsave(&host->lock, flags); 2089 2090 if (host->flags & SDHCI_DEVICE_DEAD) 2091 is_readonly = 0; 2092 else if (host->ops->get_ro) 2093 is_readonly = host->ops->get_ro(host); 2094 else if (mmc_can_gpio_ro(host->mmc)) 2095 is_readonly = mmc_gpio_get_ro(host->mmc); 2096 else 2097 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2098 & SDHCI_WRITE_PROTECT); 2099 2100 spin_unlock_irqrestore(&host->lock, flags); 2101 2102 /* This quirk needs to be replaced by a callback-function later */ 2103 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2104 !is_readonly : is_readonly; 2105 } 2106 2107 #define SAMPLE_COUNT 5 2108 2109 static int sdhci_get_ro(struct mmc_host *mmc) 2110 { 2111 struct sdhci_host *host = mmc_priv(mmc); 2112 int i, ro_count; 2113 2114 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2115 return sdhci_check_ro(host); 2116 2117 ro_count = 0; 2118 for (i = 0; i < SAMPLE_COUNT; i++) { 2119 if (sdhci_check_ro(host)) { 2120 if (++ro_count > SAMPLE_COUNT / 2) 2121 return 1; 2122 } 2123 msleep(30); 2124 } 2125 return 0; 2126 } 2127 2128 static void sdhci_hw_reset(struct mmc_host *mmc) 2129 { 2130 struct sdhci_host *host = mmc_priv(mmc); 2131 2132 if (host->ops && host->ops->hw_reset) 2133 host->ops->hw_reset(host); 2134 } 2135 2136 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2137 { 2138 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2139 if (enable) 2140 host->ier |= SDHCI_INT_CARD_INT; 2141 else 2142 host->ier &= ~SDHCI_INT_CARD_INT; 2143 2144 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2145 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2146 } 2147 } 2148 2149 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2150 { 2151 struct sdhci_host *host = mmc_priv(mmc); 2152 unsigned long flags; 2153 2154 if (enable) 2155 pm_runtime_get_noresume(host->mmc->parent); 2156 2157 spin_lock_irqsave(&host->lock, flags); 2158 sdhci_enable_sdio_irq_nolock(host, enable); 2159 spin_unlock_irqrestore(&host->lock, flags); 2160 2161 if (!enable) 2162 pm_runtime_put_noidle(host->mmc->parent); 2163 } 2164 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2165 2166 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2167 { 2168 struct sdhci_host *host = mmc_priv(mmc); 2169 unsigned long flags; 2170 2171 spin_lock_irqsave(&host->lock, flags); 2172 sdhci_enable_sdio_irq_nolock(host, true); 2173 spin_unlock_irqrestore(&host->lock, flags); 2174 } 2175 2176 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2177 struct mmc_ios *ios) 2178 { 2179 struct sdhci_host *host = mmc_priv(mmc); 2180 u16 ctrl; 2181 int ret; 2182 2183 /* 2184 * Signal Voltage Switching is only applicable for Host Controllers 2185 * v3.00 and above. 2186 */ 2187 if (host->version < SDHCI_SPEC_300) 2188 return 0; 2189 2190 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2191 2192 switch (ios->signal_voltage) { 2193 case MMC_SIGNAL_VOLTAGE_330: 2194 if (!(host->flags & SDHCI_SIGNALING_330)) 2195 return -EINVAL; 2196 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2197 ctrl &= ~SDHCI_CTRL_VDD_180; 2198 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2199 2200 if (!IS_ERR(mmc->supply.vqmmc)) { 2201 ret = mmc_regulator_set_vqmmc(mmc, ios); 2202 if (ret) { 2203 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2204 mmc_hostname(mmc)); 2205 return -EIO; 2206 } 2207 } 2208 /* Wait for 5ms */ 2209 usleep_range(5000, 5500); 2210 2211 /* 3.3V regulator output should be stable within 5 ms */ 2212 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2213 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2214 return 0; 2215 2216 pr_warn("%s: 3.3V regulator output did not become stable\n", 2217 mmc_hostname(mmc)); 2218 2219 return -EAGAIN; 2220 case MMC_SIGNAL_VOLTAGE_180: 2221 if (!(host->flags & SDHCI_SIGNALING_180)) 2222 return -EINVAL; 2223 if (!IS_ERR(mmc->supply.vqmmc)) { 2224 ret = mmc_regulator_set_vqmmc(mmc, ios); 2225 if (ret) { 2226 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2227 mmc_hostname(mmc)); 2228 return -EIO; 2229 } 2230 } 2231 2232 /* 2233 * Enable 1.8V Signal Enable in the Host Control2 2234 * register 2235 */ 2236 ctrl |= SDHCI_CTRL_VDD_180; 2237 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2238 2239 /* Some controller need to do more when switching */ 2240 if (host->ops->voltage_switch) 2241 host->ops->voltage_switch(host); 2242 2243 /* 1.8V regulator output should be stable within 5 ms */ 2244 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2245 if (ctrl & SDHCI_CTRL_VDD_180) 2246 return 0; 2247 2248 pr_warn("%s: 1.8V regulator output did not become stable\n", 2249 mmc_hostname(mmc)); 2250 2251 return -EAGAIN; 2252 case MMC_SIGNAL_VOLTAGE_120: 2253 if (!(host->flags & SDHCI_SIGNALING_120)) 2254 return -EINVAL; 2255 if (!IS_ERR(mmc->supply.vqmmc)) { 2256 ret = mmc_regulator_set_vqmmc(mmc, ios); 2257 if (ret) { 2258 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2259 mmc_hostname(mmc)); 2260 return -EIO; 2261 } 2262 } 2263 return 0; 2264 default: 2265 /* No signal voltage switch required */ 2266 return 0; 2267 } 2268 } 2269 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2270 2271 static int sdhci_card_busy(struct mmc_host *mmc) 2272 { 2273 struct sdhci_host *host = mmc_priv(mmc); 2274 u32 present_state; 2275 2276 /* Check whether DAT[0] is 0 */ 2277 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2278 2279 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2280 } 2281 2282 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2283 { 2284 struct sdhci_host *host = mmc_priv(mmc); 2285 unsigned long flags; 2286 2287 spin_lock_irqsave(&host->lock, flags); 2288 host->flags |= SDHCI_HS400_TUNING; 2289 spin_unlock_irqrestore(&host->lock, flags); 2290 2291 return 0; 2292 } 2293 2294 void sdhci_start_tuning(struct sdhci_host *host) 2295 { 2296 u16 ctrl; 2297 2298 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2299 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2300 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2301 ctrl |= SDHCI_CTRL_TUNED_CLK; 2302 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2303 2304 /* 2305 * As per the Host Controller spec v3.00, tuning command 2306 * generates Buffer Read Ready interrupt, so enable that. 2307 * 2308 * Note: The spec clearly says that when tuning sequence 2309 * is being performed, the controller does not generate 2310 * interrupts other than Buffer Read Ready interrupt. But 2311 * to make sure we don't hit a controller bug, we _only_ 2312 * enable Buffer Read Ready interrupt here. 2313 */ 2314 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2315 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2316 } 2317 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2318 2319 void sdhci_end_tuning(struct sdhci_host *host) 2320 { 2321 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2322 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2323 } 2324 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2325 2326 void sdhci_reset_tuning(struct sdhci_host *host) 2327 { 2328 u16 ctrl; 2329 2330 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2331 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2332 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2333 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2334 } 2335 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2336 2337 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2338 { 2339 sdhci_reset_tuning(host); 2340 2341 sdhci_do_reset(host, SDHCI_RESET_CMD); 2342 sdhci_do_reset(host, SDHCI_RESET_DATA); 2343 2344 sdhci_end_tuning(host); 2345 2346 mmc_abort_tuning(host->mmc, opcode); 2347 } 2348 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2349 2350 /* 2351 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2352 * tuning command does not have a data payload (or rather the hardware does it 2353 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2354 * interrupt setup is different to other commands and there is no timeout 2355 * interrupt so special handling is needed. 2356 */ 2357 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2358 { 2359 struct mmc_host *mmc = host->mmc; 2360 struct mmc_command cmd = {}; 2361 struct mmc_request mrq = {}; 2362 unsigned long flags; 2363 u32 b = host->sdma_boundary; 2364 2365 spin_lock_irqsave(&host->lock, flags); 2366 2367 cmd.opcode = opcode; 2368 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2369 cmd.mrq = &mrq; 2370 2371 mrq.cmd = &cmd; 2372 /* 2373 * In response to CMD19, the card sends 64 bytes of tuning 2374 * block to the Host Controller. So we set the block size 2375 * to 64 here. 2376 */ 2377 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2378 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2379 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2380 else 2381 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2382 2383 /* 2384 * The tuning block is sent by the card to the host controller. 2385 * So we set the TRNS_READ bit in the Transfer Mode register. 2386 * This also takes care of setting DMA Enable and Multi Block 2387 * Select in the same register to 0. 2388 */ 2389 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2390 2391 sdhci_send_command(host, &cmd); 2392 2393 host->cmd = NULL; 2394 2395 sdhci_del_timer(host, &mrq); 2396 2397 host->tuning_done = 0; 2398 2399 spin_unlock_irqrestore(&host->lock, flags); 2400 2401 /* Wait for Buffer Read Ready interrupt */ 2402 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2403 msecs_to_jiffies(50)); 2404 2405 } 2406 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2407 2408 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2409 { 2410 int i; 2411 2412 /* 2413 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2414 * of loops reaches tuning loop count. 2415 */ 2416 for (i = 0; i < host->tuning_loop_count; i++) { 2417 u16 ctrl; 2418 2419 sdhci_send_tuning(host, opcode); 2420 2421 if (!host->tuning_done) { 2422 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", 2423 mmc_hostname(host->mmc)); 2424 sdhci_abort_tuning(host, opcode); 2425 return -ETIMEDOUT; 2426 } 2427 2428 /* Spec does not require a delay between tuning cycles */ 2429 if (host->tuning_delay > 0) 2430 mdelay(host->tuning_delay); 2431 2432 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2433 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2434 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2435 return 0; /* Success! */ 2436 break; 2437 } 2438 2439 } 2440 2441 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2442 mmc_hostname(host->mmc)); 2443 sdhci_reset_tuning(host); 2444 return -EAGAIN; 2445 } 2446 2447 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2448 { 2449 struct sdhci_host *host = mmc_priv(mmc); 2450 int err = 0; 2451 unsigned int tuning_count = 0; 2452 bool hs400_tuning; 2453 2454 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2455 2456 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2457 tuning_count = host->tuning_count; 2458 2459 /* 2460 * The Host Controller needs tuning in case of SDR104 and DDR50 2461 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2462 * the Capabilities register. 2463 * If the Host Controller supports the HS200 mode then the 2464 * tuning function has to be executed. 2465 */ 2466 switch (host->timing) { 2467 /* HS400 tuning is done in HS200 mode */ 2468 case MMC_TIMING_MMC_HS400: 2469 err = -EINVAL; 2470 goto out; 2471 2472 case MMC_TIMING_MMC_HS200: 2473 /* 2474 * Periodic re-tuning for HS400 is not expected to be needed, so 2475 * disable it here. 2476 */ 2477 if (hs400_tuning) 2478 tuning_count = 0; 2479 break; 2480 2481 case MMC_TIMING_UHS_SDR104: 2482 case MMC_TIMING_UHS_DDR50: 2483 break; 2484 2485 case MMC_TIMING_UHS_SDR50: 2486 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2487 break; 2488 /* FALLTHROUGH */ 2489 2490 default: 2491 goto out; 2492 } 2493 2494 if (host->ops->platform_execute_tuning) { 2495 err = host->ops->platform_execute_tuning(host, opcode); 2496 goto out; 2497 } 2498 2499 host->mmc->retune_period = tuning_count; 2500 2501 if (host->tuning_delay < 0) 2502 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2503 2504 sdhci_start_tuning(host); 2505 2506 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2507 2508 sdhci_end_tuning(host); 2509 out: 2510 host->flags &= ~SDHCI_HS400_TUNING; 2511 2512 return err; 2513 } 2514 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2515 2516 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2517 { 2518 /* Host Controller v3.00 defines preset value registers */ 2519 if (host->version < SDHCI_SPEC_300) 2520 return; 2521 2522 /* 2523 * We only enable or disable Preset Value if they are not already 2524 * enabled or disabled respectively. Otherwise, we bail out. 2525 */ 2526 if (host->preset_enabled != enable) { 2527 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2528 2529 if (enable) 2530 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2531 else 2532 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2533 2534 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2535 2536 if (enable) 2537 host->flags |= SDHCI_PV_ENABLED; 2538 else 2539 host->flags &= ~SDHCI_PV_ENABLED; 2540 2541 host->preset_enabled = enable; 2542 } 2543 } 2544 2545 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2546 int err) 2547 { 2548 struct sdhci_host *host = mmc_priv(mmc); 2549 struct mmc_data *data = mrq->data; 2550 2551 if (data->host_cookie != COOKIE_UNMAPPED) 2552 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2553 mmc_get_dma_dir(data)); 2554 2555 data->host_cookie = COOKIE_UNMAPPED; 2556 } 2557 2558 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2559 { 2560 struct sdhci_host *host = mmc_priv(mmc); 2561 2562 mrq->data->host_cookie = COOKIE_UNMAPPED; 2563 2564 /* 2565 * No pre-mapping in the pre hook if we're using the bounce buffer, 2566 * for that we would need two bounce buffers since one buffer is 2567 * in flight when this is getting called. 2568 */ 2569 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2570 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2571 } 2572 2573 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2574 { 2575 if (host->data_cmd) { 2576 host->data_cmd->error = err; 2577 sdhci_finish_mrq(host, host->data_cmd->mrq); 2578 } 2579 2580 if (host->cmd) { 2581 host->cmd->error = err; 2582 sdhci_finish_mrq(host, host->cmd->mrq); 2583 } 2584 } 2585 2586 static void sdhci_card_event(struct mmc_host *mmc) 2587 { 2588 struct sdhci_host *host = mmc_priv(mmc); 2589 unsigned long flags; 2590 int present; 2591 2592 /* First check if client has provided their own card event */ 2593 if (host->ops->card_event) 2594 host->ops->card_event(host); 2595 2596 present = mmc->ops->get_cd(mmc); 2597 2598 spin_lock_irqsave(&host->lock, flags); 2599 2600 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2601 if (sdhci_has_requests(host) && !present) { 2602 pr_err("%s: Card removed during transfer!\n", 2603 mmc_hostname(host->mmc)); 2604 pr_err("%s: Resetting controller.\n", 2605 mmc_hostname(host->mmc)); 2606 2607 sdhci_do_reset(host, SDHCI_RESET_CMD); 2608 sdhci_do_reset(host, SDHCI_RESET_DATA); 2609 2610 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2611 } 2612 2613 spin_unlock_irqrestore(&host->lock, flags); 2614 } 2615 2616 static const struct mmc_host_ops sdhci_ops = { 2617 .request = sdhci_request, 2618 .post_req = sdhci_post_req, 2619 .pre_req = sdhci_pre_req, 2620 .set_ios = sdhci_set_ios, 2621 .get_cd = sdhci_get_cd, 2622 .get_ro = sdhci_get_ro, 2623 .hw_reset = sdhci_hw_reset, 2624 .enable_sdio_irq = sdhci_enable_sdio_irq, 2625 .ack_sdio_irq = sdhci_ack_sdio_irq, 2626 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2627 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2628 .execute_tuning = sdhci_execute_tuning, 2629 .card_event = sdhci_card_event, 2630 .card_busy = sdhci_card_busy, 2631 }; 2632 2633 /*****************************************************************************\ 2634 * * 2635 * Request done * 2636 * * 2637 \*****************************************************************************/ 2638 2639 static bool sdhci_request_done(struct sdhci_host *host) 2640 { 2641 unsigned long flags; 2642 struct mmc_request *mrq; 2643 int i; 2644 2645 spin_lock_irqsave(&host->lock, flags); 2646 2647 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2648 mrq = host->mrqs_done[i]; 2649 if (mrq) 2650 break; 2651 } 2652 2653 if (!mrq) { 2654 spin_unlock_irqrestore(&host->lock, flags); 2655 return true; 2656 } 2657 2658 /* 2659 * Always unmap the data buffers if they were mapped by 2660 * sdhci_prepare_data() whenever we finish with a request. 2661 * This avoids leaking DMA mappings on error. 2662 */ 2663 if (host->flags & SDHCI_REQ_USE_DMA) { 2664 struct mmc_data *data = mrq->data; 2665 2666 if (data && data->host_cookie == COOKIE_MAPPED) { 2667 if (host->bounce_buffer) { 2668 /* 2669 * On reads, copy the bounced data into the 2670 * sglist 2671 */ 2672 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 2673 unsigned int length = data->bytes_xfered; 2674 2675 if (length > host->bounce_buffer_size) { 2676 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 2677 mmc_hostname(host->mmc), 2678 host->bounce_buffer_size, 2679 data->bytes_xfered); 2680 /* Cap it down and continue */ 2681 length = host->bounce_buffer_size; 2682 } 2683 dma_sync_single_for_cpu( 2684 host->mmc->parent, 2685 host->bounce_addr, 2686 host->bounce_buffer_size, 2687 DMA_FROM_DEVICE); 2688 sg_copy_from_buffer(data->sg, 2689 data->sg_len, 2690 host->bounce_buffer, 2691 length); 2692 } else { 2693 /* No copying, just switch ownership */ 2694 dma_sync_single_for_cpu( 2695 host->mmc->parent, 2696 host->bounce_addr, 2697 host->bounce_buffer_size, 2698 mmc_get_dma_dir(data)); 2699 } 2700 } else { 2701 /* Unmap the raw data */ 2702 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 2703 data->sg_len, 2704 mmc_get_dma_dir(data)); 2705 } 2706 data->host_cookie = COOKIE_UNMAPPED; 2707 } 2708 } 2709 2710 /* 2711 * The controller needs a reset of internal state machines 2712 * upon error conditions. 2713 */ 2714 if (sdhci_needs_reset(host, mrq)) { 2715 /* 2716 * Do not finish until command and data lines are available for 2717 * reset. Note there can only be one other mrq, so it cannot 2718 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 2719 * would both be null. 2720 */ 2721 if (host->cmd || host->data_cmd) { 2722 spin_unlock_irqrestore(&host->lock, flags); 2723 return true; 2724 } 2725 2726 /* Some controllers need this kick or reset won't work here */ 2727 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2728 /* This is to force an update */ 2729 host->ops->set_clock(host, host->clock); 2730 2731 /* Spec says we should do both at the same time, but Ricoh 2732 controllers do not like that. */ 2733 sdhci_do_reset(host, SDHCI_RESET_CMD); 2734 sdhci_do_reset(host, SDHCI_RESET_DATA); 2735 2736 host->pending_reset = false; 2737 } 2738 2739 host->mrqs_done[i] = NULL; 2740 2741 spin_unlock_irqrestore(&host->lock, flags); 2742 2743 mmc_request_done(host->mmc, mrq); 2744 2745 return false; 2746 } 2747 2748 static void sdhci_complete_work(struct work_struct *work) 2749 { 2750 struct sdhci_host *host = container_of(work, struct sdhci_host, 2751 complete_work); 2752 2753 while (!sdhci_request_done(host)) 2754 ; 2755 } 2756 2757 static void sdhci_timeout_timer(struct timer_list *t) 2758 { 2759 struct sdhci_host *host; 2760 unsigned long flags; 2761 2762 host = from_timer(host, t, timer); 2763 2764 spin_lock_irqsave(&host->lock, flags); 2765 2766 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 2767 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 2768 mmc_hostname(host->mmc)); 2769 sdhci_dumpregs(host); 2770 2771 host->cmd->error = -ETIMEDOUT; 2772 sdhci_finish_mrq(host, host->cmd->mrq); 2773 } 2774 2775 spin_unlock_irqrestore(&host->lock, flags); 2776 } 2777 2778 static void sdhci_timeout_data_timer(struct timer_list *t) 2779 { 2780 struct sdhci_host *host; 2781 unsigned long flags; 2782 2783 host = from_timer(host, t, data_timer); 2784 2785 spin_lock_irqsave(&host->lock, flags); 2786 2787 if (host->data || host->data_cmd || 2788 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 2789 pr_err("%s: Timeout waiting for hardware interrupt.\n", 2790 mmc_hostname(host->mmc)); 2791 sdhci_dumpregs(host); 2792 2793 if (host->data) { 2794 host->data->error = -ETIMEDOUT; 2795 sdhci_finish_data(host); 2796 queue_work(host->complete_wq, &host->complete_work); 2797 } else if (host->data_cmd) { 2798 host->data_cmd->error = -ETIMEDOUT; 2799 sdhci_finish_mrq(host, host->data_cmd->mrq); 2800 } else { 2801 host->cmd->error = -ETIMEDOUT; 2802 sdhci_finish_mrq(host, host->cmd->mrq); 2803 } 2804 } 2805 2806 spin_unlock_irqrestore(&host->lock, flags); 2807 } 2808 2809 /*****************************************************************************\ 2810 * * 2811 * Interrupt handling * 2812 * * 2813 \*****************************************************************************/ 2814 2815 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 2816 { 2817 /* Handle auto-CMD12 error */ 2818 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 2819 struct mmc_request *mrq = host->data_cmd->mrq; 2820 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 2821 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 2822 SDHCI_INT_DATA_TIMEOUT : 2823 SDHCI_INT_DATA_CRC; 2824 2825 /* Treat auto-CMD12 error the same as data error */ 2826 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 2827 *intmask_p |= data_err_bit; 2828 return; 2829 } 2830 } 2831 2832 if (!host->cmd) { 2833 /* 2834 * SDHCI recovers from errors by resetting the cmd and data 2835 * circuits. Until that is done, there very well might be more 2836 * interrupts, so ignore them in that case. 2837 */ 2838 if (host->pending_reset) 2839 return; 2840 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 2841 mmc_hostname(host->mmc), (unsigned)intmask); 2842 sdhci_dumpregs(host); 2843 return; 2844 } 2845 2846 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 2847 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 2848 if (intmask & SDHCI_INT_TIMEOUT) 2849 host->cmd->error = -ETIMEDOUT; 2850 else 2851 host->cmd->error = -EILSEQ; 2852 2853 /* Treat data command CRC error the same as data CRC error */ 2854 if (host->cmd->data && 2855 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 2856 SDHCI_INT_CRC) { 2857 host->cmd = NULL; 2858 *intmask_p |= SDHCI_INT_DATA_CRC; 2859 return; 2860 } 2861 2862 __sdhci_finish_mrq(host, host->cmd->mrq); 2863 return; 2864 } 2865 2866 /* Handle auto-CMD23 error */ 2867 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 2868 struct mmc_request *mrq = host->cmd->mrq; 2869 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 2870 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 2871 -ETIMEDOUT : 2872 -EILSEQ; 2873 2874 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 2875 mrq->sbc->error = err; 2876 __sdhci_finish_mrq(host, mrq); 2877 return; 2878 } 2879 } 2880 2881 if (intmask & SDHCI_INT_RESPONSE) 2882 sdhci_finish_command(host); 2883 } 2884 2885 static void sdhci_adma_show_error(struct sdhci_host *host) 2886 { 2887 void *desc = host->adma_table; 2888 dma_addr_t dma = host->adma_addr; 2889 2890 sdhci_dumpregs(host); 2891 2892 while (true) { 2893 struct sdhci_adma2_64_desc *dma_desc = desc; 2894 2895 if (host->flags & SDHCI_USE_64_BIT_DMA) 2896 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2897 (unsigned long long)dma, 2898 le32_to_cpu(dma_desc->addr_hi), 2899 le32_to_cpu(dma_desc->addr_lo), 2900 le16_to_cpu(dma_desc->len), 2901 le16_to_cpu(dma_desc->cmd)); 2902 else 2903 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2904 (unsigned long long)dma, 2905 le32_to_cpu(dma_desc->addr_lo), 2906 le16_to_cpu(dma_desc->len), 2907 le16_to_cpu(dma_desc->cmd)); 2908 2909 desc += host->desc_sz; 2910 dma += host->desc_sz; 2911 2912 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2913 break; 2914 } 2915 } 2916 2917 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2918 { 2919 u32 command; 2920 2921 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2922 if (intmask & SDHCI_INT_DATA_AVAIL) { 2923 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2924 if (command == MMC_SEND_TUNING_BLOCK || 2925 command == MMC_SEND_TUNING_BLOCK_HS200) { 2926 host->tuning_done = 1; 2927 wake_up(&host->buf_ready_int); 2928 return; 2929 } 2930 } 2931 2932 if (!host->data) { 2933 struct mmc_command *data_cmd = host->data_cmd; 2934 2935 /* 2936 * The "data complete" interrupt is also used to 2937 * indicate that a busy state has ended. See comment 2938 * above in sdhci_cmd_irq(). 2939 */ 2940 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 2941 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2942 host->data_cmd = NULL; 2943 data_cmd->error = -ETIMEDOUT; 2944 __sdhci_finish_mrq(host, data_cmd->mrq); 2945 return; 2946 } 2947 if (intmask & SDHCI_INT_DATA_END) { 2948 host->data_cmd = NULL; 2949 /* 2950 * Some cards handle busy-end interrupt 2951 * before the command completed, so make 2952 * sure we do things in the proper order. 2953 */ 2954 if (host->cmd == data_cmd) 2955 return; 2956 2957 __sdhci_finish_mrq(host, data_cmd->mrq); 2958 return; 2959 } 2960 } 2961 2962 /* 2963 * SDHCI recovers from errors by resetting the cmd and data 2964 * circuits. Until that is done, there very well might be more 2965 * interrupts, so ignore them in that case. 2966 */ 2967 if (host->pending_reset) 2968 return; 2969 2970 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 2971 mmc_hostname(host->mmc), (unsigned)intmask); 2972 sdhci_dumpregs(host); 2973 2974 return; 2975 } 2976 2977 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2978 host->data->error = -ETIMEDOUT; 2979 else if (intmask & SDHCI_INT_DATA_END_BIT) 2980 host->data->error = -EILSEQ; 2981 else if ((intmask & SDHCI_INT_DATA_CRC) && 2982 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2983 != MMC_BUS_TEST_R) 2984 host->data->error = -EILSEQ; 2985 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2986 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 2987 intmask); 2988 sdhci_adma_show_error(host); 2989 host->data->error = -EIO; 2990 if (host->ops->adma_workaround) 2991 host->ops->adma_workaround(host, intmask); 2992 } 2993 2994 if (host->data->error) 2995 sdhci_finish_data(host); 2996 else { 2997 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2998 sdhci_transfer_pio(host); 2999 3000 /* 3001 * We currently don't do anything fancy with DMA 3002 * boundaries, but as we can't disable the feature 3003 * we need to at least restart the transfer. 3004 * 3005 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3006 * should return a valid address to continue from, but as 3007 * some controllers are faulty, don't trust them. 3008 */ 3009 if (intmask & SDHCI_INT_DMA_END) { 3010 dma_addr_t dmastart, dmanow; 3011 3012 dmastart = sdhci_sdma_address(host); 3013 dmanow = dmastart + host->data->bytes_xfered; 3014 /* 3015 * Force update to the next DMA block boundary. 3016 */ 3017 dmanow = (dmanow & 3018 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3019 SDHCI_DEFAULT_BOUNDARY_SIZE; 3020 host->data->bytes_xfered = dmanow - dmastart; 3021 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3022 &dmastart, host->data->bytes_xfered, &dmanow); 3023 sdhci_set_sdma_addr(host, dmanow); 3024 } 3025 3026 if (intmask & SDHCI_INT_DATA_END) { 3027 if (host->cmd == host->data_cmd) { 3028 /* 3029 * Data managed to finish before the 3030 * command completed. Make sure we do 3031 * things in the proper order. 3032 */ 3033 host->data_early = 1; 3034 } else { 3035 sdhci_finish_data(host); 3036 } 3037 } 3038 } 3039 } 3040 3041 static inline bool sdhci_defer_done(struct sdhci_host *host, 3042 struct mmc_request *mrq) 3043 { 3044 struct mmc_data *data = mrq->data; 3045 3046 return host->pending_reset || 3047 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3048 data->host_cookie == COOKIE_MAPPED); 3049 } 3050 3051 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3052 { 3053 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3054 irqreturn_t result = IRQ_NONE; 3055 struct sdhci_host *host = dev_id; 3056 u32 intmask, mask, unexpected = 0; 3057 int max_loops = 16; 3058 int i; 3059 3060 spin_lock(&host->lock); 3061 3062 if (host->runtime_suspended) { 3063 spin_unlock(&host->lock); 3064 return IRQ_NONE; 3065 } 3066 3067 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3068 if (!intmask || intmask == 0xffffffff) { 3069 result = IRQ_NONE; 3070 goto out; 3071 } 3072 3073 do { 3074 DBG("IRQ status 0x%08x\n", intmask); 3075 3076 if (host->ops->irq) { 3077 intmask = host->ops->irq(host, intmask); 3078 if (!intmask) 3079 goto cont; 3080 } 3081 3082 /* Clear selected interrupts. */ 3083 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3084 SDHCI_INT_BUS_POWER); 3085 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3086 3087 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3088 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3089 SDHCI_CARD_PRESENT; 3090 3091 /* 3092 * There is a observation on i.mx esdhc. INSERT 3093 * bit will be immediately set again when it gets 3094 * cleared, if a card is inserted. We have to mask 3095 * the irq to prevent interrupt storm which will 3096 * freeze the system. And the REMOVE gets the 3097 * same situation. 3098 * 3099 * More testing are needed here to ensure it works 3100 * for other platforms though. 3101 */ 3102 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3103 SDHCI_INT_CARD_REMOVE); 3104 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3105 SDHCI_INT_CARD_INSERT; 3106 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3107 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3108 3109 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3110 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3111 3112 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3113 SDHCI_INT_CARD_REMOVE); 3114 result = IRQ_WAKE_THREAD; 3115 } 3116 3117 if (intmask & SDHCI_INT_CMD_MASK) 3118 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3119 3120 if (intmask & SDHCI_INT_DATA_MASK) 3121 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3122 3123 if (intmask & SDHCI_INT_BUS_POWER) 3124 pr_err("%s: Card is consuming too much power!\n", 3125 mmc_hostname(host->mmc)); 3126 3127 if (intmask & SDHCI_INT_RETUNE) 3128 mmc_retune_needed(host->mmc); 3129 3130 if ((intmask & SDHCI_INT_CARD_INT) && 3131 (host->ier & SDHCI_INT_CARD_INT)) { 3132 sdhci_enable_sdio_irq_nolock(host, false); 3133 sdio_signal_irq(host->mmc); 3134 } 3135 3136 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3137 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3138 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3139 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3140 3141 if (intmask) { 3142 unexpected |= intmask; 3143 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3144 } 3145 cont: 3146 if (result == IRQ_NONE) 3147 result = IRQ_HANDLED; 3148 3149 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3150 } while (intmask && --max_loops); 3151 3152 /* Determine if mrqs can be completed immediately */ 3153 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3154 struct mmc_request *mrq = host->mrqs_done[i]; 3155 3156 if (!mrq) 3157 continue; 3158 3159 if (sdhci_defer_done(host, mrq)) { 3160 result = IRQ_WAKE_THREAD; 3161 } else { 3162 mrqs_done[i] = mrq; 3163 host->mrqs_done[i] = NULL; 3164 } 3165 } 3166 out: 3167 spin_unlock(&host->lock); 3168 3169 /* Process mrqs ready for immediate completion */ 3170 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3171 if (mrqs_done[i]) 3172 mmc_request_done(host->mmc, mrqs_done[i]); 3173 } 3174 3175 if (unexpected) { 3176 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3177 mmc_hostname(host->mmc), unexpected); 3178 sdhci_dumpregs(host); 3179 } 3180 3181 return result; 3182 } 3183 3184 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3185 { 3186 struct sdhci_host *host = dev_id; 3187 unsigned long flags; 3188 u32 isr; 3189 3190 while (!sdhci_request_done(host)) 3191 ; 3192 3193 spin_lock_irqsave(&host->lock, flags); 3194 isr = host->thread_isr; 3195 host->thread_isr = 0; 3196 spin_unlock_irqrestore(&host->lock, flags); 3197 3198 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3199 struct mmc_host *mmc = host->mmc; 3200 3201 mmc->ops->card_event(mmc); 3202 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3203 } 3204 3205 return IRQ_HANDLED; 3206 } 3207 3208 /*****************************************************************************\ 3209 * * 3210 * Suspend/resume * 3211 * * 3212 \*****************************************************************************/ 3213 3214 #ifdef CONFIG_PM 3215 3216 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3217 { 3218 return mmc_card_is_removable(host->mmc) && 3219 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3220 !mmc_can_gpio_cd(host->mmc); 3221 } 3222 3223 /* 3224 * To enable wakeup events, the corresponding events have to be enabled in 3225 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3226 * Table' in the SD Host Controller Standard Specification. 3227 * It is useless to restore SDHCI_INT_ENABLE state in 3228 * sdhci_disable_irq_wakeups() since it will be set by 3229 * sdhci_enable_card_detection() or sdhci_init(). 3230 */ 3231 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3232 { 3233 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3234 SDHCI_WAKE_ON_INT; 3235 u32 irq_val = 0; 3236 u8 wake_val = 0; 3237 u8 val; 3238 3239 if (sdhci_cd_irq_can_wakeup(host)) { 3240 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3241 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3242 } 3243 3244 if (mmc_card_wake_sdio_irq(host->mmc)) { 3245 wake_val |= SDHCI_WAKE_ON_INT; 3246 irq_val |= SDHCI_INT_CARD_INT; 3247 } 3248 3249 if (!irq_val) 3250 return false; 3251 3252 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3253 val &= ~mask; 3254 val |= wake_val; 3255 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3256 3257 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3258 3259 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3260 3261 return host->irq_wake_enabled; 3262 } 3263 3264 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3265 { 3266 u8 val; 3267 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3268 | SDHCI_WAKE_ON_INT; 3269 3270 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3271 val &= ~mask; 3272 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3273 3274 disable_irq_wake(host->irq); 3275 3276 host->irq_wake_enabled = false; 3277 } 3278 3279 int sdhci_suspend_host(struct sdhci_host *host) 3280 { 3281 sdhci_disable_card_detection(host); 3282 3283 mmc_retune_timer_stop(host->mmc); 3284 3285 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3286 !sdhci_enable_irq_wakeups(host)) { 3287 host->ier = 0; 3288 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3289 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3290 free_irq(host->irq, host); 3291 } 3292 3293 return 0; 3294 } 3295 3296 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3297 3298 int sdhci_resume_host(struct sdhci_host *host) 3299 { 3300 struct mmc_host *mmc = host->mmc; 3301 int ret = 0; 3302 3303 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3304 if (host->ops->enable_dma) 3305 host->ops->enable_dma(host); 3306 } 3307 3308 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 3309 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3310 /* Card keeps power but host controller does not */ 3311 sdhci_init(host, 0); 3312 host->pwr = 0; 3313 host->clock = 0; 3314 mmc->ops->set_ios(mmc, &mmc->ios); 3315 } else { 3316 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 3317 } 3318 3319 if (host->irq_wake_enabled) { 3320 sdhci_disable_irq_wakeups(host); 3321 } else { 3322 ret = request_threaded_irq(host->irq, sdhci_irq, 3323 sdhci_thread_irq, IRQF_SHARED, 3324 mmc_hostname(host->mmc), host); 3325 if (ret) 3326 return ret; 3327 } 3328 3329 sdhci_enable_card_detection(host); 3330 3331 return ret; 3332 } 3333 3334 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3335 3336 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3337 { 3338 unsigned long flags; 3339 3340 mmc_retune_timer_stop(host->mmc); 3341 3342 spin_lock_irqsave(&host->lock, flags); 3343 host->ier &= SDHCI_INT_CARD_INT; 3344 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3345 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3346 spin_unlock_irqrestore(&host->lock, flags); 3347 3348 synchronize_hardirq(host->irq); 3349 3350 spin_lock_irqsave(&host->lock, flags); 3351 host->runtime_suspended = true; 3352 spin_unlock_irqrestore(&host->lock, flags); 3353 3354 return 0; 3355 } 3356 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3357 3358 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3359 { 3360 struct mmc_host *mmc = host->mmc; 3361 unsigned long flags; 3362 int host_flags = host->flags; 3363 3364 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3365 if (host->ops->enable_dma) 3366 host->ops->enable_dma(host); 3367 } 3368 3369 sdhci_init(host, soft_reset); 3370 3371 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3372 mmc->ios.power_mode != MMC_POWER_OFF) { 3373 /* Force clock and power re-program */ 3374 host->pwr = 0; 3375 host->clock = 0; 3376 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3377 mmc->ops->set_ios(mmc, &mmc->ios); 3378 3379 if ((host_flags & SDHCI_PV_ENABLED) && 3380 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3381 spin_lock_irqsave(&host->lock, flags); 3382 sdhci_enable_preset_value(host, true); 3383 spin_unlock_irqrestore(&host->lock, flags); 3384 } 3385 3386 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3387 mmc->ops->hs400_enhanced_strobe) 3388 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3389 } 3390 3391 spin_lock_irqsave(&host->lock, flags); 3392 3393 host->runtime_suspended = false; 3394 3395 /* Enable SDIO IRQ */ 3396 if (sdio_irq_claimed(mmc)) 3397 sdhci_enable_sdio_irq_nolock(host, true); 3398 3399 /* Enable Card Detection */ 3400 sdhci_enable_card_detection(host); 3401 3402 spin_unlock_irqrestore(&host->lock, flags); 3403 3404 return 0; 3405 } 3406 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3407 3408 #endif /* CONFIG_PM */ 3409 3410 /*****************************************************************************\ 3411 * * 3412 * Command Queue Engine (CQE) helpers * 3413 * * 3414 \*****************************************************************************/ 3415 3416 void sdhci_cqe_enable(struct mmc_host *mmc) 3417 { 3418 struct sdhci_host *host = mmc_priv(mmc); 3419 unsigned long flags; 3420 u8 ctrl; 3421 3422 spin_lock_irqsave(&host->lock, flags); 3423 3424 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3425 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3426 /* 3427 * Host from V4.10 supports ADMA3 DMA type. 3428 * ADMA3 performs integrated descriptor which is more suitable 3429 * for cmd queuing to fetch both command and transfer descriptors. 3430 */ 3431 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3432 ctrl |= SDHCI_CTRL_ADMA3; 3433 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3434 ctrl |= SDHCI_CTRL_ADMA64; 3435 else 3436 ctrl |= SDHCI_CTRL_ADMA32; 3437 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3438 3439 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3440 SDHCI_BLOCK_SIZE); 3441 3442 /* Set maximum timeout */ 3443 sdhci_set_timeout(host, NULL); 3444 3445 host->ier = host->cqe_ier; 3446 3447 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3448 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3449 3450 host->cqe_on = true; 3451 3452 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3453 mmc_hostname(mmc), host->ier, 3454 sdhci_readl(host, SDHCI_INT_STATUS)); 3455 3456 spin_unlock_irqrestore(&host->lock, flags); 3457 } 3458 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3459 3460 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3461 { 3462 struct sdhci_host *host = mmc_priv(mmc); 3463 unsigned long flags; 3464 3465 spin_lock_irqsave(&host->lock, flags); 3466 3467 sdhci_set_default_irqs(host); 3468 3469 host->cqe_on = false; 3470 3471 if (recovery) { 3472 sdhci_do_reset(host, SDHCI_RESET_CMD); 3473 sdhci_do_reset(host, SDHCI_RESET_DATA); 3474 } 3475 3476 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3477 mmc_hostname(mmc), host->ier, 3478 sdhci_readl(host, SDHCI_INT_STATUS)); 3479 3480 spin_unlock_irqrestore(&host->lock, flags); 3481 } 3482 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3483 3484 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3485 int *data_error) 3486 { 3487 u32 mask; 3488 3489 if (!host->cqe_on) 3490 return false; 3491 3492 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3493 *cmd_error = -EILSEQ; 3494 else if (intmask & SDHCI_INT_TIMEOUT) 3495 *cmd_error = -ETIMEDOUT; 3496 else 3497 *cmd_error = 0; 3498 3499 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3500 *data_error = -EILSEQ; 3501 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3502 *data_error = -ETIMEDOUT; 3503 else if (intmask & SDHCI_INT_ADMA_ERROR) 3504 *data_error = -EIO; 3505 else 3506 *data_error = 0; 3507 3508 /* Clear selected interrupts. */ 3509 mask = intmask & host->cqe_ier; 3510 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3511 3512 if (intmask & SDHCI_INT_BUS_POWER) 3513 pr_err("%s: Card is consuming too much power!\n", 3514 mmc_hostname(host->mmc)); 3515 3516 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3517 if (intmask) { 3518 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3519 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3520 mmc_hostname(host->mmc), intmask); 3521 sdhci_dumpregs(host); 3522 } 3523 3524 return true; 3525 } 3526 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3527 3528 /*****************************************************************************\ 3529 * * 3530 * Device allocation/registration * 3531 * * 3532 \*****************************************************************************/ 3533 3534 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3535 size_t priv_size) 3536 { 3537 struct mmc_host *mmc; 3538 struct sdhci_host *host; 3539 3540 WARN_ON(dev == NULL); 3541 3542 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3543 if (!mmc) 3544 return ERR_PTR(-ENOMEM); 3545 3546 host = mmc_priv(mmc); 3547 host->mmc = mmc; 3548 host->mmc_host_ops = sdhci_ops; 3549 mmc->ops = &host->mmc_host_ops; 3550 3551 host->flags = SDHCI_SIGNALING_330; 3552 3553 host->cqe_ier = SDHCI_CQE_INT_MASK; 3554 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3555 3556 host->tuning_delay = -1; 3557 host->tuning_loop_count = MAX_TUNING_LOOP; 3558 3559 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3560 3561 /* 3562 * The DMA table descriptor count is calculated as the maximum 3563 * number of segments times 2, to allow for an alignment 3564 * descriptor for each segment, plus 1 for a nop end descriptor. 3565 */ 3566 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 3567 3568 return host; 3569 } 3570 3571 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3572 3573 static int sdhci_set_dma_mask(struct sdhci_host *host) 3574 { 3575 struct mmc_host *mmc = host->mmc; 3576 struct device *dev = mmc_dev(mmc); 3577 int ret = -EINVAL; 3578 3579 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3580 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3581 3582 /* Try 64-bit mask if hardware is capable of it */ 3583 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3584 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3585 if (ret) { 3586 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3587 mmc_hostname(mmc)); 3588 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3589 } 3590 } 3591 3592 /* 32-bit mask as default & fallback */ 3593 if (ret) { 3594 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3595 if (ret) 3596 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 3597 mmc_hostname(mmc)); 3598 } 3599 3600 return ret; 3601 } 3602 3603 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 3604 const u32 *caps, const u32 *caps1) 3605 { 3606 u16 v; 3607 u64 dt_caps_mask = 0; 3608 u64 dt_caps = 0; 3609 3610 if (host->read_caps) 3611 return; 3612 3613 host->read_caps = true; 3614 3615 if (debug_quirks) 3616 host->quirks = debug_quirks; 3617 3618 if (debug_quirks2) 3619 host->quirks2 = debug_quirks2; 3620 3621 sdhci_do_reset(host, SDHCI_RESET_ALL); 3622 3623 if (host->v4_mode) 3624 sdhci_do_enable_v4_mode(host); 3625 3626 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3627 "sdhci-caps-mask", &dt_caps_mask); 3628 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3629 "sdhci-caps", &dt_caps); 3630 3631 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 3632 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 3633 3634 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 3635 return; 3636 3637 if (caps) { 3638 host->caps = *caps; 3639 } else { 3640 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 3641 host->caps &= ~lower_32_bits(dt_caps_mask); 3642 host->caps |= lower_32_bits(dt_caps); 3643 } 3644 3645 if (host->version < SDHCI_SPEC_300) 3646 return; 3647 3648 if (caps1) { 3649 host->caps1 = *caps1; 3650 } else { 3651 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 3652 host->caps1 &= ~upper_32_bits(dt_caps_mask); 3653 host->caps1 |= upper_32_bits(dt_caps); 3654 } 3655 } 3656 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 3657 3658 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 3659 { 3660 struct mmc_host *mmc = host->mmc; 3661 unsigned int max_blocks; 3662 unsigned int bounce_size; 3663 int ret; 3664 3665 /* 3666 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 3667 * has diminishing returns, this is probably because SD/MMC 3668 * cards are usually optimized to handle this size of requests. 3669 */ 3670 bounce_size = SZ_64K; 3671 /* 3672 * Adjust downwards to maximum request size if this is less 3673 * than our segment size, else hammer down the maximum 3674 * request size to the maximum buffer size. 3675 */ 3676 if (mmc->max_req_size < bounce_size) 3677 bounce_size = mmc->max_req_size; 3678 max_blocks = bounce_size / 512; 3679 3680 /* 3681 * When we just support one segment, we can get significant 3682 * speedups by the help of a bounce buffer to group scattered 3683 * reads/writes together. 3684 */ 3685 host->bounce_buffer = devm_kmalloc(mmc->parent, 3686 bounce_size, 3687 GFP_KERNEL); 3688 if (!host->bounce_buffer) { 3689 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 3690 mmc_hostname(mmc), 3691 bounce_size); 3692 /* 3693 * Exiting with zero here makes sure we proceed with 3694 * mmc->max_segs == 1. 3695 */ 3696 return; 3697 } 3698 3699 host->bounce_addr = dma_map_single(mmc->parent, 3700 host->bounce_buffer, 3701 bounce_size, 3702 DMA_BIDIRECTIONAL); 3703 ret = dma_mapping_error(mmc->parent, host->bounce_addr); 3704 if (ret) 3705 /* Again fall back to max_segs == 1 */ 3706 return; 3707 host->bounce_buffer_size = bounce_size; 3708 3709 /* Lie about this since we're bouncing */ 3710 mmc->max_segs = max_blocks; 3711 mmc->max_seg_size = bounce_size; 3712 mmc->max_req_size = bounce_size; 3713 3714 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 3715 mmc_hostname(mmc), max_blocks, bounce_size); 3716 } 3717 3718 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 3719 { 3720 /* 3721 * According to SD Host Controller spec v4.10, bit[27] added from 3722 * version 4.10 in Capabilities Register is used as 64-bit System 3723 * Address support for V4 mode. 3724 */ 3725 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 3726 return host->caps & SDHCI_CAN_64BIT_V4; 3727 3728 return host->caps & SDHCI_CAN_64BIT; 3729 } 3730 3731 int sdhci_setup_host(struct sdhci_host *host) 3732 { 3733 struct mmc_host *mmc; 3734 u32 max_current_caps; 3735 unsigned int ocr_avail; 3736 unsigned int override_timeout_clk; 3737 u32 max_clk; 3738 int ret; 3739 3740 WARN_ON(host == NULL); 3741 if (host == NULL) 3742 return -EINVAL; 3743 3744 mmc = host->mmc; 3745 3746 /* 3747 * If there are external regulators, get them. Note this must be done 3748 * early before resetting the host and reading the capabilities so that 3749 * the host can take the appropriate action if regulators are not 3750 * available. 3751 */ 3752 ret = mmc_regulator_get_supply(mmc); 3753 if (ret) 3754 return ret; 3755 3756 DBG("Version: 0x%08x | Present: 0x%08x\n", 3757 sdhci_readw(host, SDHCI_HOST_VERSION), 3758 sdhci_readl(host, SDHCI_PRESENT_STATE)); 3759 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 3760 sdhci_readl(host, SDHCI_CAPABILITIES), 3761 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 3762 3763 sdhci_read_caps(host); 3764 3765 override_timeout_clk = host->timeout_clk; 3766 3767 if (host->version > SDHCI_SPEC_420) { 3768 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 3769 mmc_hostname(mmc), host->version); 3770 } 3771 3772 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 3773 host->flags |= SDHCI_USE_SDMA; 3774 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 3775 DBG("Controller doesn't have SDMA capability\n"); 3776 else 3777 host->flags |= SDHCI_USE_SDMA; 3778 3779 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 3780 (host->flags & SDHCI_USE_SDMA)) { 3781 DBG("Disabling DMA as it is marked broken\n"); 3782 host->flags &= ~SDHCI_USE_SDMA; 3783 } 3784 3785 if ((host->version >= SDHCI_SPEC_200) && 3786 (host->caps & SDHCI_CAN_DO_ADMA2)) 3787 host->flags |= SDHCI_USE_ADMA; 3788 3789 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 3790 (host->flags & SDHCI_USE_ADMA)) { 3791 DBG("Disabling ADMA as it is marked broken\n"); 3792 host->flags &= ~SDHCI_USE_ADMA; 3793 } 3794 3795 if (sdhci_can_64bit_dma(host)) 3796 host->flags |= SDHCI_USE_64_BIT_DMA; 3797 3798 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3799 if (host->ops->set_dma_mask) 3800 ret = host->ops->set_dma_mask(host); 3801 else 3802 ret = sdhci_set_dma_mask(host); 3803 3804 if (!ret && host->ops->enable_dma) 3805 ret = host->ops->enable_dma(host); 3806 3807 if (ret) { 3808 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 3809 mmc_hostname(mmc)); 3810 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 3811 3812 ret = 0; 3813 } 3814 } 3815 3816 /* SDMA does not support 64-bit DMA if v4 mode not set */ 3817 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 3818 host->flags &= ~SDHCI_USE_SDMA; 3819 3820 if (host->flags & SDHCI_USE_ADMA) { 3821 dma_addr_t dma; 3822 void *buf; 3823 3824 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3825 host->adma_table_sz = host->adma_table_cnt * 3826 SDHCI_ADMA2_64_DESC_SZ(host); 3827 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 3828 } else { 3829 host->adma_table_sz = host->adma_table_cnt * 3830 SDHCI_ADMA2_32_DESC_SZ; 3831 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 3832 } 3833 3834 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 3835 /* 3836 * Use zalloc to zero the reserved high 32-bits of 128-bit 3837 * descriptors so that they never need to be written. 3838 */ 3839 buf = dma_alloc_coherent(mmc_dev(mmc), 3840 host->align_buffer_sz + host->adma_table_sz, 3841 &dma, GFP_KERNEL); 3842 if (!buf) { 3843 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3844 mmc_hostname(mmc)); 3845 host->flags &= ~SDHCI_USE_ADMA; 3846 } else if ((dma + host->align_buffer_sz) & 3847 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 3848 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 3849 mmc_hostname(mmc)); 3850 host->flags &= ~SDHCI_USE_ADMA; 3851 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3852 host->adma_table_sz, buf, dma); 3853 } else { 3854 host->align_buffer = buf; 3855 host->align_addr = dma; 3856 3857 host->adma_table = buf + host->align_buffer_sz; 3858 host->adma_addr = dma + host->align_buffer_sz; 3859 } 3860 } 3861 3862 /* 3863 * If we use DMA, then it's up to the caller to set the DMA 3864 * mask, but PIO does not need the hw shim so we set a new 3865 * mask here in that case. 3866 */ 3867 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 3868 host->dma_mask = DMA_BIT_MASK(64); 3869 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3870 } 3871 3872 if (host->version >= SDHCI_SPEC_300) 3873 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) 3874 >> SDHCI_CLOCK_BASE_SHIFT; 3875 else 3876 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK) 3877 >> SDHCI_CLOCK_BASE_SHIFT; 3878 3879 host->max_clk *= 1000000; 3880 if (host->max_clk == 0 || host->quirks & 3881 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 3882 if (!host->ops->get_max_clock) { 3883 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 3884 mmc_hostname(mmc)); 3885 ret = -ENODEV; 3886 goto undma; 3887 } 3888 host->max_clk = host->ops->get_max_clock(host); 3889 } 3890 3891 /* 3892 * In case of Host Controller v3.00, find out whether clock 3893 * multiplier is supported. 3894 */ 3895 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >> 3896 SDHCI_CLOCK_MUL_SHIFT; 3897 3898 /* 3899 * In case the value in Clock Multiplier is 0, then programmable 3900 * clock mode is not supported, otherwise the actual clock 3901 * multiplier is one more than the value of Clock Multiplier 3902 * in the Capabilities Register. 3903 */ 3904 if (host->clk_mul) 3905 host->clk_mul += 1; 3906 3907 /* 3908 * Set host parameters. 3909 */ 3910 max_clk = host->max_clk; 3911 3912 if (host->ops->get_min_clock) 3913 mmc->f_min = host->ops->get_min_clock(host); 3914 else if (host->version >= SDHCI_SPEC_300) { 3915 if (host->clk_mul) { 3916 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3917 max_clk = host->max_clk * host->clk_mul; 3918 } else 3919 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3920 } else 3921 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3922 3923 if (!mmc->f_max || mmc->f_max > max_clk) 3924 mmc->f_max = max_clk; 3925 3926 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3927 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> 3928 SDHCI_TIMEOUT_CLK_SHIFT; 3929 3930 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 3931 host->timeout_clk *= 1000; 3932 3933 if (host->timeout_clk == 0) { 3934 if (!host->ops->get_timeout_clock) { 3935 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3936 mmc_hostname(mmc)); 3937 ret = -ENODEV; 3938 goto undma; 3939 } 3940 3941 host->timeout_clk = 3942 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 3943 1000); 3944 } 3945 3946 if (override_timeout_clk) 3947 host->timeout_clk = override_timeout_clk; 3948 3949 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3950 host->ops->get_max_timeout_count(host) : 1 << 27; 3951 mmc->max_busy_timeout /= host->timeout_clk; 3952 } 3953 3954 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 3955 !host->ops->get_max_timeout_count) 3956 mmc->max_busy_timeout = 0; 3957 3958 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3959 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3960 3961 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3962 host->flags |= SDHCI_AUTO_CMD12; 3963 3964 /* 3965 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 3966 * For v4 mode, SDMA may use Auto-CMD23 as well. 3967 */ 3968 if ((host->version >= SDHCI_SPEC_300) && 3969 ((host->flags & SDHCI_USE_ADMA) || 3970 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 3971 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3972 host->flags |= SDHCI_AUTO_CMD23; 3973 DBG("Auto-CMD23 available\n"); 3974 } else { 3975 DBG("Auto-CMD23 unavailable\n"); 3976 } 3977 3978 /* 3979 * A controller may support 8-bit width, but the board itself 3980 * might not have the pins brought out. Boards that support 3981 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3982 * their platform code before calling sdhci_add_host(), and we 3983 * won't assume 8-bit width for hosts without that CAP. 3984 */ 3985 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3986 mmc->caps |= MMC_CAP_4_BIT_DATA; 3987 3988 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3989 mmc->caps &= ~MMC_CAP_CMD23; 3990 3991 if (host->caps & SDHCI_CAN_DO_HISPD) 3992 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3993 3994 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3995 mmc_card_is_removable(mmc) && 3996 mmc_gpio_get_cd(host->mmc) < 0) 3997 mmc->caps |= MMC_CAP_NEEDS_POLL; 3998 3999 if (!IS_ERR(mmc->supply.vqmmc)) { 4000 ret = regulator_enable(mmc->supply.vqmmc); 4001 4002 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4003 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4004 1950000)) 4005 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4006 SDHCI_SUPPORT_SDR50 | 4007 SDHCI_SUPPORT_DDR50); 4008 4009 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4010 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4011 3600000)) 4012 host->flags &= ~SDHCI_SIGNALING_330; 4013 4014 if (ret) { 4015 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4016 mmc_hostname(mmc), ret); 4017 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4018 } 4019 } 4020 4021 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4022 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4023 SDHCI_SUPPORT_DDR50); 4024 /* 4025 * The SDHCI controller in a SoC might support HS200/HS400 4026 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4027 * but if the board is modeled such that the IO lines are not 4028 * connected to 1.8v then HS200/HS400 cannot be supported. 4029 * Disable HS200/HS400 if the board does not have 1.8v connected 4030 * to the IO lines. (Applicable for other modes in 1.8v) 4031 */ 4032 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4033 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4034 } 4035 4036 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4037 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4038 SDHCI_SUPPORT_DDR50)) 4039 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4040 4041 /* SDR104 supports also implies SDR50 support */ 4042 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4043 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4044 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4045 * field can be promoted to support HS200. 4046 */ 4047 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4048 mmc->caps2 |= MMC_CAP2_HS200; 4049 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4050 mmc->caps |= MMC_CAP_UHS_SDR50; 4051 } 4052 4053 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4054 (host->caps1 & SDHCI_SUPPORT_HS400)) 4055 mmc->caps2 |= MMC_CAP2_HS400; 4056 4057 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4058 (IS_ERR(mmc->supply.vqmmc) || 4059 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4060 1300000))) 4061 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4062 4063 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4064 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4065 mmc->caps |= MMC_CAP_UHS_DDR50; 4066 4067 /* Does the host need tuning for SDR50? */ 4068 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4069 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4070 4071 /* Driver Type(s) (A, C, D) supported by the host */ 4072 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4073 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4074 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4075 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4076 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4077 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4078 4079 /* Initial value for re-tuning timer count */ 4080 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 4081 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 4082 4083 /* 4084 * In case Re-tuning Timer is not disabled, the actual value of 4085 * re-tuning timer will be 2 ^ (n - 1). 4086 */ 4087 if (host->tuning_count) 4088 host->tuning_count = 1 << (host->tuning_count - 1); 4089 4090 /* Re-tuning mode supported by the Host Controller */ 4091 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >> 4092 SDHCI_RETUNING_MODE_SHIFT; 4093 4094 ocr_avail = 0; 4095 4096 /* 4097 * According to SD Host Controller spec v3.00, if the Host System 4098 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4099 * the value is meaningful only if Voltage Support in the Capabilities 4100 * register is set. The actual current value is 4 times the register 4101 * value. 4102 */ 4103 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4104 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4105 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4106 if (curr > 0) { 4107 4108 /* convert to SDHCI_MAX_CURRENT format */ 4109 curr = curr/1000; /* convert to mA */ 4110 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4111 4112 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4113 max_current_caps = 4114 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 4115 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 4116 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 4117 } 4118 } 4119 4120 if (host->caps & SDHCI_CAN_VDD_330) { 4121 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4122 4123 mmc->max_current_330 = ((max_current_caps & 4124 SDHCI_MAX_CURRENT_330_MASK) >> 4125 SDHCI_MAX_CURRENT_330_SHIFT) * 4126 SDHCI_MAX_CURRENT_MULTIPLIER; 4127 } 4128 if (host->caps & SDHCI_CAN_VDD_300) { 4129 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4130 4131 mmc->max_current_300 = ((max_current_caps & 4132 SDHCI_MAX_CURRENT_300_MASK) >> 4133 SDHCI_MAX_CURRENT_300_SHIFT) * 4134 SDHCI_MAX_CURRENT_MULTIPLIER; 4135 } 4136 if (host->caps & SDHCI_CAN_VDD_180) { 4137 ocr_avail |= MMC_VDD_165_195; 4138 4139 mmc->max_current_180 = ((max_current_caps & 4140 SDHCI_MAX_CURRENT_180_MASK) >> 4141 SDHCI_MAX_CURRENT_180_SHIFT) * 4142 SDHCI_MAX_CURRENT_MULTIPLIER; 4143 } 4144 4145 /* If OCR set by host, use it instead. */ 4146 if (host->ocr_mask) 4147 ocr_avail = host->ocr_mask; 4148 4149 /* If OCR set by external regulators, give it highest prio. */ 4150 if (mmc->ocr_avail) 4151 ocr_avail = mmc->ocr_avail; 4152 4153 mmc->ocr_avail = ocr_avail; 4154 mmc->ocr_avail_sdio = ocr_avail; 4155 if (host->ocr_avail_sdio) 4156 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4157 mmc->ocr_avail_sd = ocr_avail; 4158 if (host->ocr_avail_sd) 4159 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4160 else /* normal SD controllers don't support 1.8V */ 4161 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4162 mmc->ocr_avail_mmc = ocr_avail; 4163 if (host->ocr_avail_mmc) 4164 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4165 4166 if (mmc->ocr_avail == 0) { 4167 pr_err("%s: Hardware doesn't report any support voltages.\n", 4168 mmc_hostname(mmc)); 4169 ret = -ENODEV; 4170 goto unreg; 4171 } 4172 4173 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4174 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4175 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4176 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4177 host->flags |= SDHCI_SIGNALING_180; 4178 4179 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4180 host->flags |= SDHCI_SIGNALING_120; 4181 4182 spin_lock_init(&host->lock); 4183 4184 /* 4185 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4186 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4187 * is less anyway. 4188 */ 4189 mmc->max_req_size = 524288; 4190 4191 /* 4192 * Maximum number of segments. Depends on if the hardware 4193 * can do scatter/gather or not. 4194 */ 4195 if (host->flags & SDHCI_USE_ADMA) { 4196 mmc->max_segs = SDHCI_MAX_SEGS; 4197 } else if (host->flags & SDHCI_USE_SDMA) { 4198 mmc->max_segs = 1; 4199 if (swiotlb_max_segment()) { 4200 unsigned int max_req_size = (1 << IO_TLB_SHIFT) * 4201 IO_TLB_SEGSIZE; 4202 mmc->max_req_size = min(mmc->max_req_size, 4203 max_req_size); 4204 } 4205 } else { /* PIO */ 4206 mmc->max_segs = SDHCI_MAX_SEGS; 4207 } 4208 4209 /* 4210 * Maximum segment size. Could be one segment with the maximum number 4211 * of bytes. When doing hardware scatter/gather, each entry cannot 4212 * be larger than 64 KiB though. 4213 */ 4214 if (host->flags & SDHCI_USE_ADMA) { 4215 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 4216 mmc->max_seg_size = 65535; 4217 else 4218 mmc->max_seg_size = 65536; 4219 } else { 4220 mmc->max_seg_size = mmc->max_req_size; 4221 } 4222 4223 /* 4224 * Maximum block size. This varies from controller to controller and 4225 * is specified in the capabilities register. 4226 */ 4227 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4228 mmc->max_blk_size = 2; 4229 } else { 4230 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4231 SDHCI_MAX_BLOCK_SHIFT; 4232 if (mmc->max_blk_size >= 3) { 4233 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4234 mmc_hostname(mmc)); 4235 mmc->max_blk_size = 0; 4236 } 4237 } 4238 4239 mmc->max_blk_size = 512 << mmc->max_blk_size; 4240 4241 /* 4242 * Maximum block count. 4243 */ 4244 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4245 4246 if (mmc->max_segs == 1) 4247 /* This may alter mmc->*_blk_* parameters */ 4248 sdhci_allocate_bounce_buffer(host); 4249 4250 return 0; 4251 4252 unreg: 4253 if (!IS_ERR(mmc->supply.vqmmc)) 4254 regulator_disable(mmc->supply.vqmmc); 4255 undma: 4256 if (host->align_buffer) 4257 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4258 host->adma_table_sz, host->align_buffer, 4259 host->align_addr); 4260 host->adma_table = NULL; 4261 host->align_buffer = NULL; 4262 4263 return ret; 4264 } 4265 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4266 4267 void sdhci_cleanup_host(struct sdhci_host *host) 4268 { 4269 struct mmc_host *mmc = host->mmc; 4270 4271 if (!IS_ERR(mmc->supply.vqmmc)) 4272 regulator_disable(mmc->supply.vqmmc); 4273 4274 if (host->align_buffer) 4275 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4276 host->adma_table_sz, host->align_buffer, 4277 host->align_addr); 4278 host->adma_table = NULL; 4279 host->align_buffer = NULL; 4280 } 4281 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4282 4283 int __sdhci_add_host(struct sdhci_host *host) 4284 { 4285 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4286 struct mmc_host *mmc = host->mmc; 4287 int ret; 4288 4289 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4290 if (!host->complete_wq) 4291 return -ENOMEM; 4292 4293 INIT_WORK(&host->complete_work, sdhci_complete_work); 4294 4295 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4296 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4297 4298 init_waitqueue_head(&host->buf_ready_int); 4299 4300 sdhci_init(host, 0); 4301 4302 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4303 IRQF_SHARED, mmc_hostname(mmc), host); 4304 if (ret) { 4305 pr_err("%s: Failed to request IRQ %d: %d\n", 4306 mmc_hostname(mmc), host->irq, ret); 4307 goto unwq; 4308 } 4309 4310 ret = sdhci_led_register(host); 4311 if (ret) { 4312 pr_err("%s: Failed to register LED device: %d\n", 4313 mmc_hostname(mmc), ret); 4314 goto unirq; 4315 } 4316 4317 ret = mmc_add_host(mmc); 4318 if (ret) 4319 goto unled; 4320 4321 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4322 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4323 (host->flags & SDHCI_USE_ADMA) ? 4324 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4325 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4326 4327 sdhci_enable_card_detection(host); 4328 4329 return 0; 4330 4331 unled: 4332 sdhci_led_unregister(host); 4333 unirq: 4334 sdhci_do_reset(host, SDHCI_RESET_ALL); 4335 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4336 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4337 free_irq(host->irq, host); 4338 unwq: 4339 destroy_workqueue(host->complete_wq); 4340 4341 return ret; 4342 } 4343 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4344 4345 int sdhci_add_host(struct sdhci_host *host) 4346 { 4347 int ret; 4348 4349 ret = sdhci_setup_host(host); 4350 if (ret) 4351 return ret; 4352 4353 ret = __sdhci_add_host(host); 4354 if (ret) 4355 goto cleanup; 4356 4357 return 0; 4358 4359 cleanup: 4360 sdhci_cleanup_host(host); 4361 4362 return ret; 4363 } 4364 EXPORT_SYMBOL_GPL(sdhci_add_host); 4365 4366 void sdhci_remove_host(struct sdhci_host *host, int dead) 4367 { 4368 struct mmc_host *mmc = host->mmc; 4369 unsigned long flags; 4370 4371 if (dead) { 4372 spin_lock_irqsave(&host->lock, flags); 4373 4374 host->flags |= SDHCI_DEVICE_DEAD; 4375 4376 if (sdhci_has_requests(host)) { 4377 pr_err("%s: Controller removed during " 4378 " transfer!\n", mmc_hostname(mmc)); 4379 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4380 } 4381 4382 spin_unlock_irqrestore(&host->lock, flags); 4383 } 4384 4385 sdhci_disable_card_detection(host); 4386 4387 mmc_remove_host(mmc); 4388 4389 sdhci_led_unregister(host); 4390 4391 if (!dead) 4392 sdhci_do_reset(host, SDHCI_RESET_ALL); 4393 4394 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4395 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4396 free_irq(host->irq, host); 4397 4398 del_timer_sync(&host->timer); 4399 del_timer_sync(&host->data_timer); 4400 4401 destroy_workqueue(host->complete_wq); 4402 4403 if (!IS_ERR(mmc->supply.vqmmc)) 4404 regulator_disable(mmc->supply.vqmmc); 4405 4406 if (host->align_buffer) 4407 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4408 host->adma_table_sz, host->align_buffer, 4409 host->align_addr); 4410 4411 host->adma_table = NULL; 4412 host->align_buffer = NULL; 4413 } 4414 4415 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4416 4417 void sdhci_free_host(struct sdhci_host *host) 4418 { 4419 mmc_free_host(host->mmc); 4420 } 4421 4422 EXPORT_SYMBOL_GPL(sdhci_free_host); 4423 4424 /*****************************************************************************\ 4425 * * 4426 * Driver init/exit * 4427 * * 4428 \*****************************************************************************/ 4429 4430 static int __init sdhci_drv_init(void) 4431 { 4432 pr_info(DRIVER_NAME 4433 ": Secure Digital Host Controller Interface driver\n"); 4434 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4435 4436 return 0; 4437 } 4438 4439 static void __exit sdhci_drv_exit(void) 4440 { 4441 } 4442 4443 module_init(sdhci_drv_init); 4444 module_exit(sdhci_drv_exit); 4445 4446 module_param(debug_quirks, uint, 0444); 4447 module_param(debug_quirks2, uint, 0444); 4448 4449 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4450 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4451 MODULE_LICENSE("GPL"); 4452 4453 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4454 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4455