1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/ktime.h> 14 #include <linux/highmem.h> 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/scatterlist.h> 20 #include <linux/sizes.h> 21 #include <linux/swiotlb.h> 22 #include <linux/regulator/consumer.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/of.h> 25 26 #include <linux/leds.h> 27 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/sdio.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #include "sdhci.h" 35 36 #define DRIVER_NAME "sdhci" 37 38 #define DBG(f, x...) \ 39 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 40 41 #define SDHCI_DUMP(f, x...) \ 42 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 43 44 #define MAX_TUNING_LOOP 40 45 46 static unsigned int debug_quirks = 0; 47 static unsigned int debug_quirks2; 48 49 static void sdhci_finish_data(struct sdhci_host *); 50 51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 52 53 void sdhci_dumpregs(struct sdhci_host *host) 54 { 55 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 56 57 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 58 sdhci_readl(host, SDHCI_DMA_ADDRESS), 59 sdhci_readw(host, SDHCI_HOST_VERSION)); 60 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 61 sdhci_readw(host, SDHCI_BLOCK_SIZE), 62 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 63 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 64 sdhci_readl(host, SDHCI_ARGUMENT), 65 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 66 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 67 sdhci_readl(host, SDHCI_PRESENT_STATE), 68 sdhci_readb(host, SDHCI_HOST_CONTROL)); 69 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 70 sdhci_readb(host, SDHCI_POWER_CONTROL), 71 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 72 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 73 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 74 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 75 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 76 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 77 sdhci_readl(host, SDHCI_INT_STATUS)); 78 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 79 sdhci_readl(host, SDHCI_INT_ENABLE), 80 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 81 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 82 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 83 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 84 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 85 sdhci_readl(host, SDHCI_CAPABILITIES), 86 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 87 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 88 sdhci_readw(host, SDHCI_COMMAND), 89 sdhci_readl(host, SDHCI_MAX_CURRENT)); 90 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 91 sdhci_readl(host, SDHCI_RESPONSE), 92 sdhci_readl(host, SDHCI_RESPONSE + 4)); 93 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 94 sdhci_readl(host, SDHCI_RESPONSE + 8), 95 sdhci_readl(host, SDHCI_RESPONSE + 12)); 96 SDHCI_DUMP("Host ctl2: 0x%08x\n", 97 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 98 99 if (host->flags & SDHCI_USE_ADMA) { 100 if (host->flags & SDHCI_USE_64_BIT_DMA) { 101 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 102 sdhci_readl(host, SDHCI_ADMA_ERROR), 103 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 105 } else { 106 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 107 sdhci_readl(host, SDHCI_ADMA_ERROR), 108 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 109 } 110 } 111 112 SDHCI_DUMP("============================================\n"); 113 } 114 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 115 116 /*****************************************************************************\ 117 * * 118 * Low level functions * 119 * * 120 \*****************************************************************************/ 121 122 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 123 { 124 u16 ctrl2; 125 126 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 127 if (ctrl2 & SDHCI_CTRL_V4_MODE) 128 return; 129 130 ctrl2 |= SDHCI_CTRL_V4_MODE; 131 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 132 } 133 134 /* 135 * This can be called before sdhci_add_host() by Vendor's host controller 136 * driver to enable v4 mode if supported. 137 */ 138 void sdhci_enable_v4_mode(struct sdhci_host *host) 139 { 140 host->v4_mode = true; 141 sdhci_do_enable_v4_mode(host); 142 } 143 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 144 145 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 146 { 147 return cmd->data || cmd->flags & MMC_RSP_BUSY; 148 } 149 150 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 151 { 152 u32 present; 153 154 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 155 !mmc_card_is_removable(host->mmc)) 156 return; 157 158 if (enable) { 159 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 160 SDHCI_CARD_PRESENT; 161 162 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 163 SDHCI_INT_CARD_INSERT; 164 } else { 165 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 166 } 167 168 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 169 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 170 } 171 172 static void sdhci_enable_card_detection(struct sdhci_host *host) 173 { 174 sdhci_set_card_detection(host, true); 175 } 176 177 static void sdhci_disable_card_detection(struct sdhci_host *host) 178 { 179 sdhci_set_card_detection(host, false); 180 } 181 182 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 183 { 184 if (host->bus_on) 185 return; 186 host->bus_on = true; 187 pm_runtime_get_noresume(host->mmc->parent); 188 } 189 190 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 191 { 192 if (!host->bus_on) 193 return; 194 host->bus_on = false; 195 pm_runtime_put_noidle(host->mmc->parent); 196 } 197 198 void sdhci_reset(struct sdhci_host *host, u8 mask) 199 { 200 ktime_t timeout; 201 202 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 203 204 if (mask & SDHCI_RESET_ALL) { 205 host->clock = 0; 206 /* Reset-all turns off SD Bus Power */ 207 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 208 sdhci_runtime_pm_bus_off(host); 209 } 210 211 /* Wait max 100 ms */ 212 timeout = ktime_add_ms(ktime_get(), 100); 213 214 /* hw clears the bit when it's done */ 215 while (1) { 216 bool timedout = ktime_after(ktime_get(), timeout); 217 218 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 219 break; 220 if (timedout) { 221 pr_err("%s: Reset 0x%x never completed.\n", 222 mmc_hostname(host->mmc), (int)mask); 223 sdhci_dumpregs(host); 224 return; 225 } 226 udelay(10); 227 } 228 } 229 EXPORT_SYMBOL_GPL(sdhci_reset); 230 231 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 232 { 233 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 234 struct mmc_host *mmc = host->mmc; 235 236 if (!mmc->ops->get_cd(mmc)) 237 return; 238 } 239 240 host->ops->reset(host, mask); 241 242 if (mask & SDHCI_RESET_ALL) { 243 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 244 if (host->ops->enable_dma) 245 host->ops->enable_dma(host); 246 } 247 248 /* Resetting the controller clears many */ 249 host->preset_enabled = false; 250 } 251 } 252 253 static void sdhci_set_default_irqs(struct sdhci_host *host) 254 { 255 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 256 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 257 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 258 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 259 SDHCI_INT_RESPONSE; 260 261 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 262 host->tuning_mode == SDHCI_TUNING_MODE_3) 263 host->ier |= SDHCI_INT_RETUNE; 264 265 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 266 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 267 } 268 269 static void sdhci_config_dma(struct sdhci_host *host) 270 { 271 u8 ctrl; 272 u16 ctrl2; 273 274 if (host->version < SDHCI_SPEC_200) 275 return; 276 277 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 278 279 /* 280 * Always adjust the DMA selection as some controllers 281 * (e.g. JMicron) can't do PIO properly when the selection 282 * is ADMA. 283 */ 284 ctrl &= ~SDHCI_CTRL_DMA_MASK; 285 if (!(host->flags & SDHCI_REQ_USE_DMA)) 286 goto out; 287 288 /* Note if DMA Select is zero then SDMA is selected */ 289 if (host->flags & SDHCI_USE_ADMA) 290 ctrl |= SDHCI_CTRL_ADMA32; 291 292 if (host->flags & SDHCI_USE_64_BIT_DMA) { 293 /* 294 * If v4 mode, all supported DMA can be 64-bit addressing if 295 * controller supports 64-bit system address, otherwise only 296 * ADMA can support 64-bit addressing. 297 */ 298 if (host->v4_mode) { 299 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 300 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 301 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 302 } else if (host->flags & SDHCI_USE_ADMA) { 303 /* 304 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 305 * set SDHCI_CTRL_ADMA64. 306 */ 307 ctrl |= SDHCI_CTRL_ADMA64; 308 } 309 } 310 311 out: 312 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 313 } 314 315 static void sdhci_init(struct sdhci_host *host, int soft) 316 { 317 struct mmc_host *mmc = host->mmc; 318 319 if (soft) 320 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 321 else 322 sdhci_do_reset(host, SDHCI_RESET_ALL); 323 324 if (host->v4_mode) 325 sdhci_do_enable_v4_mode(host); 326 327 sdhci_set_default_irqs(host); 328 329 host->cqe_on = false; 330 331 if (soft) { 332 /* force clock reconfiguration */ 333 host->clock = 0; 334 mmc->ops->set_ios(mmc, &mmc->ios); 335 } 336 } 337 338 static void sdhci_reinit(struct sdhci_host *host) 339 { 340 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 341 342 sdhci_init(host, 0); 343 sdhci_enable_card_detection(host); 344 345 /* 346 * A change to the card detect bits indicates a change in present state, 347 * refer sdhci_set_card_detection(). A card detect interrupt might have 348 * been missed while the host controller was being reset, so trigger a 349 * rescan to check. 350 */ 351 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 352 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 353 } 354 355 static void __sdhci_led_activate(struct sdhci_host *host) 356 { 357 u8 ctrl; 358 359 if (host->quirks & SDHCI_QUIRK_NO_LED) 360 return; 361 362 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 363 ctrl |= SDHCI_CTRL_LED; 364 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 365 } 366 367 static void __sdhci_led_deactivate(struct sdhci_host *host) 368 { 369 u8 ctrl; 370 371 if (host->quirks & SDHCI_QUIRK_NO_LED) 372 return; 373 374 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 375 ctrl &= ~SDHCI_CTRL_LED; 376 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 377 } 378 379 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 380 static void sdhci_led_control(struct led_classdev *led, 381 enum led_brightness brightness) 382 { 383 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 384 unsigned long flags; 385 386 spin_lock_irqsave(&host->lock, flags); 387 388 if (host->runtime_suspended) 389 goto out; 390 391 if (brightness == LED_OFF) 392 __sdhci_led_deactivate(host); 393 else 394 __sdhci_led_activate(host); 395 out: 396 spin_unlock_irqrestore(&host->lock, flags); 397 } 398 399 static int sdhci_led_register(struct sdhci_host *host) 400 { 401 struct mmc_host *mmc = host->mmc; 402 403 if (host->quirks & SDHCI_QUIRK_NO_LED) 404 return 0; 405 406 snprintf(host->led_name, sizeof(host->led_name), 407 "%s::", mmc_hostname(mmc)); 408 409 host->led.name = host->led_name; 410 host->led.brightness = LED_OFF; 411 host->led.default_trigger = mmc_hostname(mmc); 412 host->led.brightness_set = sdhci_led_control; 413 414 return led_classdev_register(mmc_dev(mmc), &host->led); 415 } 416 417 static void sdhci_led_unregister(struct sdhci_host *host) 418 { 419 if (host->quirks & SDHCI_QUIRK_NO_LED) 420 return; 421 422 led_classdev_unregister(&host->led); 423 } 424 425 static inline void sdhci_led_activate(struct sdhci_host *host) 426 { 427 } 428 429 static inline void sdhci_led_deactivate(struct sdhci_host *host) 430 { 431 } 432 433 #else 434 435 static inline int sdhci_led_register(struct sdhci_host *host) 436 { 437 return 0; 438 } 439 440 static inline void sdhci_led_unregister(struct sdhci_host *host) 441 { 442 } 443 444 static inline void sdhci_led_activate(struct sdhci_host *host) 445 { 446 __sdhci_led_activate(host); 447 } 448 449 static inline void sdhci_led_deactivate(struct sdhci_host *host) 450 { 451 __sdhci_led_deactivate(host); 452 } 453 454 #endif 455 456 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 457 unsigned long timeout) 458 { 459 if (sdhci_data_line_cmd(mrq->cmd)) 460 mod_timer(&host->data_timer, timeout); 461 else 462 mod_timer(&host->timer, timeout); 463 } 464 465 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 466 { 467 if (sdhci_data_line_cmd(mrq->cmd)) 468 del_timer(&host->data_timer); 469 else 470 del_timer(&host->timer); 471 } 472 473 static inline bool sdhci_has_requests(struct sdhci_host *host) 474 { 475 return host->cmd || host->data_cmd; 476 } 477 478 /*****************************************************************************\ 479 * * 480 * Core functions * 481 * * 482 \*****************************************************************************/ 483 484 static void sdhci_read_block_pio(struct sdhci_host *host) 485 { 486 unsigned long flags; 487 size_t blksize, len, chunk; 488 u32 uninitialized_var(scratch); 489 u8 *buf; 490 491 DBG("PIO reading\n"); 492 493 blksize = host->data->blksz; 494 chunk = 0; 495 496 local_irq_save(flags); 497 498 while (blksize) { 499 BUG_ON(!sg_miter_next(&host->sg_miter)); 500 501 len = min(host->sg_miter.length, blksize); 502 503 blksize -= len; 504 host->sg_miter.consumed = len; 505 506 buf = host->sg_miter.addr; 507 508 while (len) { 509 if (chunk == 0) { 510 scratch = sdhci_readl(host, SDHCI_BUFFER); 511 chunk = 4; 512 } 513 514 *buf = scratch & 0xFF; 515 516 buf++; 517 scratch >>= 8; 518 chunk--; 519 len--; 520 } 521 } 522 523 sg_miter_stop(&host->sg_miter); 524 525 local_irq_restore(flags); 526 } 527 528 static void sdhci_write_block_pio(struct sdhci_host *host) 529 { 530 unsigned long flags; 531 size_t blksize, len, chunk; 532 u32 scratch; 533 u8 *buf; 534 535 DBG("PIO writing\n"); 536 537 blksize = host->data->blksz; 538 chunk = 0; 539 scratch = 0; 540 541 local_irq_save(flags); 542 543 while (blksize) { 544 BUG_ON(!sg_miter_next(&host->sg_miter)); 545 546 len = min(host->sg_miter.length, blksize); 547 548 blksize -= len; 549 host->sg_miter.consumed = len; 550 551 buf = host->sg_miter.addr; 552 553 while (len) { 554 scratch |= (u32)*buf << (chunk * 8); 555 556 buf++; 557 chunk++; 558 len--; 559 560 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 561 sdhci_writel(host, scratch, SDHCI_BUFFER); 562 chunk = 0; 563 scratch = 0; 564 } 565 } 566 } 567 568 sg_miter_stop(&host->sg_miter); 569 570 local_irq_restore(flags); 571 } 572 573 static void sdhci_transfer_pio(struct sdhci_host *host) 574 { 575 u32 mask; 576 577 if (host->blocks == 0) 578 return; 579 580 if (host->data->flags & MMC_DATA_READ) 581 mask = SDHCI_DATA_AVAILABLE; 582 else 583 mask = SDHCI_SPACE_AVAILABLE; 584 585 /* 586 * Some controllers (JMicron JMB38x) mess up the buffer bits 587 * for transfers < 4 bytes. As long as it is just one block, 588 * we can ignore the bits. 589 */ 590 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 591 (host->data->blocks == 1)) 592 mask = ~0; 593 594 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 595 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 596 udelay(100); 597 598 if (host->data->flags & MMC_DATA_READ) 599 sdhci_read_block_pio(host); 600 else 601 sdhci_write_block_pio(host); 602 603 host->blocks--; 604 if (host->blocks == 0) 605 break; 606 } 607 608 DBG("PIO transfer complete.\n"); 609 } 610 611 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 612 struct mmc_data *data, int cookie) 613 { 614 int sg_count; 615 616 /* 617 * If the data buffers are already mapped, return the previous 618 * dma_map_sg() result. 619 */ 620 if (data->host_cookie == COOKIE_PRE_MAPPED) 621 return data->sg_count; 622 623 /* Bounce write requests to the bounce buffer */ 624 if (host->bounce_buffer) { 625 unsigned int length = data->blksz * data->blocks; 626 627 if (length > host->bounce_buffer_size) { 628 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 629 mmc_hostname(host->mmc), length, 630 host->bounce_buffer_size); 631 return -EIO; 632 } 633 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 634 /* Copy the data to the bounce buffer */ 635 sg_copy_to_buffer(data->sg, data->sg_len, 636 host->bounce_buffer, 637 length); 638 } 639 /* Switch ownership to the DMA */ 640 dma_sync_single_for_device(host->mmc->parent, 641 host->bounce_addr, 642 host->bounce_buffer_size, 643 mmc_get_dma_dir(data)); 644 /* Just a dummy value */ 645 sg_count = 1; 646 } else { 647 /* Just access the data directly from memory */ 648 sg_count = dma_map_sg(mmc_dev(host->mmc), 649 data->sg, data->sg_len, 650 mmc_get_dma_dir(data)); 651 } 652 653 if (sg_count == 0) 654 return -ENOSPC; 655 656 data->sg_count = sg_count; 657 data->host_cookie = cookie; 658 659 return sg_count; 660 } 661 662 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 663 { 664 local_irq_save(*flags); 665 return kmap_atomic(sg_page(sg)) + sg->offset; 666 } 667 668 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 669 { 670 kunmap_atomic(buffer); 671 local_irq_restore(*flags); 672 } 673 674 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 675 dma_addr_t addr, int len, unsigned int cmd) 676 { 677 struct sdhci_adma2_64_desc *dma_desc = *desc; 678 679 /* 32-bit and 64-bit descriptors have these members in same position */ 680 dma_desc->cmd = cpu_to_le16(cmd); 681 dma_desc->len = cpu_to_le16(len); 682 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 683 684 if (host->flags & SDHCI_USE_64_BIT_DMA) 685 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 686 687 *desc += host->desc_sz; 688 } 689 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 690 691 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 692 void **desc, dma_addr_t addr, 693 int len, unsigned int cmd) 694 { 695 if (host->ops->adma_write_desc) 696 host->ops->adma_write_desc(host, desc, addr, len, cmd); 697 else 698 sdhci_adma_write_desc(host, desc, addr, len, cmd); 699 } 700 701 static void sdhci_adma_mark_end(void *desc) 702 { 703 struct sdhci_adma2_64_desc *dma_desc = desc; 704 705 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 706 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 707 } 708 709 static void sdhci_adma_table_pre(struct sdhci_host *host, 710 struct mmc_data *data, int sg_count) 711 { 712 struct scatterlist *sg; 713 unsigned long flags; 714 dma_addr_t addr, align_addr; 715 void *desc, *align; 716 char *buffer; 717 int len, offset, i; 718 719 /* 720 * The spec does not specify endianness of descriptor table. 721 * We currently guess that it is LE. 722 */ 723 724 host->sg_count = sg_count; 725 726 desc = host->adma_table; 727 align = host->align_buffer; 728 729 align_addr = host->align_addr; 730 731 for_each_sg(data->sg, sg, host->sg_count, i) { 732 addr = sg_dma_address(sg); 733 len = sg_dma_len(sg); 734 735 /* 736 * The SDHCI specification states that ADMA addresses must 737 * be 32-bit aligned. If they aren't, then we use a bounce 738 * buffer for the (up to three) bytes that screw up the 739 * alignment. 740 */ 741 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 742 SDHCI_ADMA2_MASK; 743 if (offset) { 744 if (data->flags & MMC_DATA_WRITE) { 745 buffer = sdhci_kmap_atomic(sg, &flags); 746 memcpy(align, buffer, offset); 747 sdhci_kunmap_atomic(buffer, &flags); 748 } 749 750 /* tran, valid */ 751 __sdhci_adma_write_desc(host, &desc, align_addr, 752 offset, ADMA2_TRAN_VALID); 753 754 BUG_ON(offset > 65536); 755 756 align += SDHCI_ADMA2_ALIGN; 757 align_addr += SDHCI_ADMA2_ALIGN; 758 759 addr += offset; 760 len -= offset; 761 } 762 763 BUG_ON(len > 65536); 764 765 /* tran, valid */ 766 if (len) 767 __sdhci_adma_write_desc(host, &desc, addr, len, 768 ADMA2_TRAN_VALID); 769 770 /* 771 * If this triggers then we have a calculation bug 772 * somewhere. :/ 773 */ 774 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 775 } 776 777 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 778 /* Mark the last descriptor as the terminating descriptor */ 779 if (desc != host->adma_table) { 780 desc -= host->desc_sz; 781 sdhci_adma_mark_end(desc); 782 } 783 } else { 784 /* Add a terminating entry - nop, end, valid */ 785 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 786 } 787 } 788 789 static void sdhci_adma_table_post(struct sdhci_host *host, 790 struct mmc_data *data) 791 { 792 struct scatterlist *sg; 793 int i, size; 794 void *align; 795 char *buffer; 796 unsigned long flags; 797 798 if (data->flags & MMC_DATA_READ) { 799 bool has_unaligned = false; 800 801 /* Do a quick scan of the SG list for any unaligned mappings */ 802 for_each_sg(data->sg, sg, host->sg_count, i) 803 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 804 has_unaligned = true; 805 break; 806 } 807 808 if (has_unaligned) { 809 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 810 data->sg_len, DMA_FROM_DEVICE); 811 812 align = host->align_buffer; 813 814 for_each_sg(data->sg, sg, host->sg_count, i) { 815 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 816 size = SDHCI_ADMA2_ALIGN - 817 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 818 819 buffer = sdhci_kmap_atomic(sg, &flags); 820 memcpy(buffer, align, size); 821 sdhci_kunmap_atomic(buffer, &flags); 822 823 align += SDHCI_ADMA2_ALIGN; 824 } 825 } 826 } 827 } 828 } 829 830 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 831 { 832 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 833 if (host->flags & SDHCI_USE_64_BIT_DMA) 834 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 835 } 836 837 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 838 { 839 if (host->bounce_buffer) 840 return host->bounce_addr; 841 else 842 return sg_dma_address(host->data->sg); 843 } 844 845 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 846 { 847 if (host->v4_mode) 848 sdhci_set_adma_addr(host, addr); 849 else 850 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 851 } 852 853 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 854 struct mmc_command *cmd, 855 struct mmc_data *data) 856 { 857 unsigned int target_timeout; 858 859 /* timeout in us */ 860 if (!data) { 861 target_timeout = cmd->busy_timeout * 1000; 862 } else { 863 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 864 if (host->clock && data->timeout_clks) { 865 unsigned long long val; 866 867 /* 868 * data->timeout_clks is in units of clock cycles. 869 * host->clock is in Hz. target_timeout is in us. 870 * Hence, us = 1000000 * cycles / Hz. Round up. 871 */ 872 val = 1000000ULL * data->timeout_clks; 873 if (do_div(val, host->clock)) 874 target_timeout++; 875 target_timeout += val; 876 } 877 } 878 879 return target_timeout; 880 } 881 882 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 883 struct mmc_command *cmd) 884 { 885 struct mmc_data *data = cmd->data; 886 struct mmc_host *mmc = host->mmc; 887 struct mmc_ios *ios = &mmc->ios; 888 unsigned char bus_width = 1 << ios->bus_width; 889 unsigned int blksz; 890 unsigned int freq; 891 u64 target_timeout; 892 u64 transfer_time; 893 894 target_timeout = sdhci_target_timeout(host, cmd, data); 895 target_timeout *= NSEC_PER_USEC; 896 897 if (data) { 898 blksz = data->blksz; 899 freq = host->mmc->actual_clock ? : host->clock; 900 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 901 do_div(transfer_time, freq); 902 /* multiply by '2' to account for any unknowns */ 903 transfer_time = transfer_time * 2; 904 /* calculate timeout for the entire data */ 905 host->data_timeout = data->blocks * target_timeout + 906 transfer_time; 907 } else { 908 host->data_timeout = target_timeout; 909 } 910 911 if (host->data_timeout) 912 host->data_timeout += MMC_CMD_TRANSFER_TIME; 913 } 914 915 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 916 bool *too_big) 917 { 918 u8 count; 919 struct mmc_data *data; 920 unsigned target_timeout, current_timeout; 921 922 *too_big = true; 923 924 /* 925 * If the host controller provides us with an incorrect timeout 926 * value, just skip the check and use 0xE. The hardware may take 927 * longer to time out, but that's much better than having a too-short 928 * timeout value. 929 */ 930 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 931 return 0xE; 932 933 /* Unspecified command, asume max */ 934 if (cmd == NULL) 935 return 0xE; 936 937 data = cmd->data; 938 /* Unspecified timeout, assume max */ 939 if (!data && !cmd->busy_timeout) 940 return 0xE; 941 942 /* timeout in us */ 943 target_timeout = sdhci_target_timeout(host, cmd, data); 944 945 /* 946 * Figure out needed cycles. 947 * We do this in steps in order to fit inside a 32 bit int. 948 * The first step is the minimum timeout, which will have a 949 * minimum resolution of 6 bits: 950 * (1) 2^13*1000 > 2^22, 951 * (2) host->timeout_clk < 2^16 952 * => 953 * (1) / (2) > 2^6 954 */ 955 count = 0; 956 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 957 while (current_timeout < target_timeout) { 958 count++; 959 current_timeout <<= 1; 960 if (count >= 0xF) 961 break; 962 } 963 964 if (count >= 0xF) { 965 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 966 DBG("Too large timeout 0x%x requested for CMD%d!\n", 967 count, cmd->opcode); 968 count = 0xE; 969 } else { 970 *too_big = false; 971 } 972 973 return count; 974 } 975 976 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 977 { 978 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 979 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 980 981 if (host->flags & SDHCI_REQ_USE_DMA) 982 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 983 else 984 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 985 986 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 987 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 988 else 989 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 990 991 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 992 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 993 } 994 995 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 996 { 997 if (enable) 998 host->ier |= SDHCI_INT_DATA_TIMEOUT; 999 else 1000 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1001 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1002 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1003 } 1004 1005 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1006 { 1007 u8 count; 1008 1009 if (host->ops->set_timeout) { 1010 host->ops->set_timeout(host, cmd); 1011 } else { 1012 bool too_big = false; 1013 1014 count = sdhci_calc_timeout(host, cmd, &too_big); 1015 1016 if (too_big && 1017 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1018 sdhci_calc_sw_timeout(host, cmd); 1019 sdhci_set_data_timeout_irq(host, false); 1020 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1021 sdhci_set_data_timeout_irq(host, true); 1022 } 1023 1024 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1025 } 1026 } 1027 1028 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1029 { 1030 struct mmc_data *data = cmd->data; 1031 1032 host->data_timeout = 0; 1033 1034 if (sdhci_data_line_cmd(cmd)) 1035 sdhci_set_timeout(host, cmd); 1036 1037 if (!data) 1038 return; 1039 1040 WARN_ON(host->data); 1041 1042 /* Sanity checks */ 1043 BUG_ON(data->blksz * data->blocks > 524288); 1044 BUG_ON(data->blksz > host->mmc->max_blk_size); 1045 BUG_ON(data->blocks > 65535); 1046 1047 host->data = data; 1048 host->data_early = 0; 1049 host->data->bytes_xfered = 0; 1050 1051 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1052 struct scatterlist *sg; 1053 unsigned int length_mask, offset_mask; 1054 int i; 1055 1056 host->flags |= SDHCI_REQ_USE_DMA; 1057 1058 /* 1059 * FIXME: This doesn't account for merging when mapping the 1060 * scatterlist. 1061 * 1062 * The assumption here being that alignment and lengths are 1063 * the same after DMA mapping to device address space. 1064 */ 1065 length_mask = 0; 1066 offset_mask = 0; 1067 if (host->flags & SDHCI_USE_ADMA) { 1068 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1069 length_mask = 3; 1070 /* 1071 * As we use up to 3 byte chunks to work 1072 * around alignment problems, we need to 1073 * check the offset as well. 1074 */ 1075 offset_mask = 3; 1076 } 1077 } else { 1078 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1079 length_mask = 3; 1080 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1081 offset_mask = 3; 1082 } 1083 1084 if (unlikely(length_mask | offset_mask)) { 1085 for_each_sg(data->sg, sg, data->sg_len, i) { 1086 if (sg->length & length_mask) { 1087 DBG("Reverting to PIO because of transfer size (%d)\n", 1088 sg->length); 1089 host->flags &= ~SDHCI_REQ_USE_DMA; 1090 break; 1091 } 1092 if (sg->offset & offset_mask) { 1093 DBG("Reverting to PIO because of bad alignment\n"); 1094 host->flags &= ~SDHCI_REQ_USE_DMA; 1095 break; 1096 } 1097 } 1098 } 1099 } 1100 1101 if (host->flags & SDHCI_REQ_USE_DMA) { 1102 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1103 1104 if (sg_cnt <= 0) { 1105 /* 1106 * This only happens when someone fed 1107 * us an invalid request. 1108 */ 1109 WARN_ON(1); 1110 host->flags &= ~SDHCI_REQ_USE_DMA; 1111 } else if (host->flags & SDHCI_USE_ADMA) { 1112 sdhci_adma_table_pre(host, data, sg_cnt); 1113 sdhci_set_adma_addr(host, host->adma_addr); 1114 } else { 1115 WARN_ON(sg_cnt != 1); 1116 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1117 } 1118 } 1119 1120 sdhci_config_dma(host); 1121 1122 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1123 int flags; 1124 1125 flags = SG_MITER_ATOMIC; 1126 if (host->data->flags & MMC_DATA_READ) 1127 flags |= SG_MITER_TO_SG; 1128 else 1129 flags |= SG_MITER_FROM_SG; 1130 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1131 host->blocks = data->blocks; 1132 } 1133 1134 sdhci_set_transfer_irqs(host); 1135 1136 /* Set the DMA boundary value and block size */ 1137 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1138 SDHCI_BLOCK_SIZE); 1139 1140 /* 1141 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1142 * can be supported, in that case 16-bit block count register must be 0. 1143 */ 1144 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1145 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1146 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1147 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1148 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1149 } else { 1150 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1151 } 1152 } 1153 1154 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1155 struct mmc_request *mrq) 1156 { 1157 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1158 !mrq->cap_cmd_during_tfr; 1159 } 1160 1161 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1162 struct mmc_command *cmd, 1163 u16 *mode) 1164 { 1165 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1166 (cmd->opcode != SD_IO_RW_EXTENDED); 1167 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1168 u16 ctrl2; 1169 1170 /* 1171 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1172 * Select' is recommended rather than use of 'Auto CMD12 1173 * Enable' or 'Auto CMD23 Enable'. 1174 */ 1175 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { 1176 *mode |= SDHCI_TRNS_AUTO_SEL; 1177 1178 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1179 if (use_cmd23) 1180 ctrl2 |= SDHCI_CMD23_ENABLE; 1181 else 1182 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1183 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1184 1185 return; 1186 } 1187 1188 /* 1189 * If we are sending CMD23, CMD12 never gets sent 1190 * on successful completion (so no Auto-CMD12). 1191 */ 1192 if (use_cmd12) 1193 *mode |= SDHCI_TRNS_AUTO_CMD12; 1194 else if (use_cmd23) 1195 *mode |= SDHCI_TRNS_AUTO_CMD23; 1196 } 1197 1198 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1199 struct mmc_command *cmd) 1200 { 1201 u16 mode = 0; 1202 struct mmc_data *data = cmd->data; 1203 1204 if (data == NULL) { 1205 if (host->quirks2 & 1206 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1207 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1208 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1209 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1210 } else { 1211 /* clear Auto CMD settings for no data CMDs */ 1212 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1213 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1214 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1215 } 1216 return; 1217 } 1218 1219 WARN_ON(!host->data); 1220 1221 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1222 mode = SDHCI_TRNS_BLK_CNT_EN; 1223 1224 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1225 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1226 sdhci_auto_cmd_select(host, cmd, &mode); 1227 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) 1228 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1229 } 1230 1231 if (data->flags & MMC_DATA_READ) 1232 mode |= SDHCI_TRNS_READ; 1233 if (host->flags & SDHCI_REQ_USE_DMA) 1234 mode |= SDHCI_TRNS_DMA; 1235 1236 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1237 } 1238 1239 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1240 { 1241 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1242 ((mrq->cmd && mrq->cmd->error) || 1243 (mrq->sbc && mrq->sbc->error) || 1244 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1245 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1246 } 1247 1248 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1249 { 1250 int i; 1251 1252 if (host->cmd && host->cmd->mrq == mrq) 1253 host->cmd = NULL; 1254 1255 if (host->data_cmd && host->data_cmd->mrq == mrq) 1256 host->data_cmd = NULL; 1257 1258 if (host->data && host->data->mrq == mrq) 1259 host->data = NULL; 1260 1261 if (sdhci_needs_reset(host, mrq)) 1262 host->pending_reset = true; 1263 1264 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1265 if (host->mrqs_done[i] == mrq) { 1266 WARN_ON(1); 1267 return; 1268 } 1269 } 1270 1271 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1272 if (!host->mrqs_done[i]) { 1273 host->mrqs_done[i] = mrq; 1274 break; 1275 } 1276 } 1277 1278 WARN_ON(i >= SDHCI_MAX_MRQS); 1279 1280 sdhci_del_timer(host, mrq); 1281 1282 if (!sdhci_has_requests(host)) 1283 sdhci_led_deactivate(host); 1284 } 1285 1286 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1287 { 1288 __sdhci_finish_mrq(host, mrq); 1289 1290 queue_work(host->complete_wq, &host->complete_work); 1291 } 1292 1293 static void sdhci_finish_data(struct sdhci_host *host) 1294 { 1295 struct mmc_command *data_cmd = host->data_cmd; 1296 struct mmc_data *data = host->data; 1297 1298 host->data = NULL; 1299 host->data_cmd = NULL; 1300 1301 /* 1302 * The controller needs a reset of internal state machines upon error 1303 * conditions. 1304 */ 1305 if (data->error) { 1306 if (!host->cmd || host->cmd == data_cmd) 1307 sdhci_do_reset(host, SDHCI_RESET_CMD); 1308 sdhci_do_reset(host, SDHCI_RESET_DATA); 1309 } 1310 1311 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1312 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1313 sdhci_adma_table_post(host, data); 1314 1315 /* 1316 * The specification states that the block count register must 1317 * be updated, but it does not specify at what point in the 1318 * data flow. That makes the register entirely useless to read 1319 * back so we have to assume that nothing made it to the card 1320 * in the event of an error. 1321 */ 1322 if (data->error) 1323 data->bytes_xfered = 0; 1324 else 1325 data->bytes_xfered = data->blksz * data->blocks; 1326 1327 /* 1328 * Need to send CMD12 if - 1329 * a) open-ended multiblock transfer (no CMD23) 1330 * b) error in multiblock transfer 1331 */ 1332 if (data->stop && 1333 (data->error || 1334 !data->mrq->sbc)) { 1335 /* 1336 * 'cap_cmd_during_tfr' request must not use the command line 1337 * after mmc_command_done() has been called. It is upper layer's 1338 * responsibility to send the stop command if required. 1339 */ 1340 if (data->mrq->cap_cmd_during_tfr) { 1341 __sdhci_finish_mrq(host, data->mrq); 1342 } else { 1343 /* Avoid triggering warning in sdhci_send_command() */ 1344 host->cmd = NULL; 1345 sdhci_send_command(host, data->stop); 1346 } 1347 } else { 1348 __sdhci_finish_mrq(host, data->mrq); 1349 } 1350 } 1351 1352 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1353 { 1354 int flags; 1355 u32 mask; 1356 unsigned long timeout; 1357 1358 WARN_ON(host->cmd); 1359 1360 /* Initially, a command has no error */ 1361 cmd->error = 0; 1362 1363 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1364 cmd->opcode == MMC_STOP_TRANSMISSION) 1365 cmd->flags |= MMC_RSP_BUSY; 1366 1367 /* Wait max 10 ms */ 1368 timeout = 10; 1369 1370 mask = SDHCI_CMD_INHIBIT; 1371 if (sdhci_data_line_cmd(cmd)) 1372 mask |= SDHCI_DATA_INHIBIT; 1373 1374 /* We shouldn't wait for data inihibit for stop commands, even 1375 though they might use busy signaling */ 1376 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1377 mask &= ~SDHCI_DATA_INHIBIT; 1378 1379 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1380 if (timeout == 0) { 1381 pr_err("%s: Controller never released inhibit bit(s).\n", 1382 mmc_hostname(host->mmc)); 1383 sdhci_dumpregs(host); 1384 cmd->error = -EIO; 1385 sdhci_finish_mrq(host, cmd->mrq); 1386 return; 1387 } 1388 timeout--; 1389 mdelay(1); 1390 } 1391 1392 host->cmd = cmd; 1393 if (sdhci_data_line_cmd(cmd)) { 1394 WARN_ON(host->data_cmd); 1395 host->data_cmd = cmd; 1396 } 1397 1398 sdhci_prepare_data(host, cmd); 1399 1400 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1401 1402 sdhci_set_transfer_mode(host, cmd); 1403 1404 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1405 pr_err("%s: Unsupported response type!\n", 1406 mmc_hostname(host->mmc)); 1407 cmd->error = -EINVAL; 1408 sdhci_finish_mrq(host, cmd->mrq); 1409 return; 1410 } 1411 1412 if (!(cmd->flags & MMC_RSP_PRESENT)) 1413 flags = SDHCI_CMD_RESP_NONE; 1414 else if (cmd->flags & MMC_RSP_136) 1415 flags = SDHCI_CMD_RESP_LONG; 1416 else if (cmd->flags & MMC_RSP_BUSY) 1417 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1418 else 1419 flags = SDHCI_CMD_RESP_SHORT; 1420 1421 if (cmd->flags & MMC_RSP_CRC) 1422 flags |= SDHCI_CMD_CRC; 1423 if (cmd->flags & MMC_RSP_OPCODE) 1424 flags |= SDHCI_CMD_INDEX; 1425 1426 /* CMD19 is special in that the Data Present Select should be set */ 1427 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1428 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1429 flags |= SDHCI_CMD_DATA; 1430 1431 timeout = jiffies; 1432 if (host->data_timeout) 1433 timeout += nsecs_to_jiffies(host->data_timeout); 1434 else if (!cmd->data && cmd->busy_timeout > 9000) 1435 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1436 else 1437 timeout += 10 * HZ; 1438 sdhci_mod_timer(host, cmd->mrq, timeout); 1439 1440 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1441 } 1442 EXPORT_SYMBOL_GPL(sdhci_send_command); 1443 1444 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1445 { 1446 int i, reg; 1447 1448 for (i = 0; i < 4; i++) { 1449 reg = SDHCI_RESPONSE + (3 - i) * 4; 1450 cmd->resp[i] = sdhci_readl(host, reg); 1451 } 1452 1453 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1454 return; 1455 1456 /* CRC is stripped so we need to do some shifting */ 1457 for (i = 0; i < 4; i++) { 1458 cmd->resp[i] <<= 8; 1459 if (i != 3) 1460 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1461 } 1462 } 1463 1464 static void sdhci_finish_command(struct sdhci_host *host) 1465 { 1466 struct mmc_command *cmd = host->cmd; 1467 1468 host->cmd = NULL; 1469 1470 if (cmd->flags & MMC_RSP_PRESENT) { 1471 if (cmd->flags & MMC_RSP_136) { 1472 sdhci_read_rsp_136(host, cmd); 1473 } else { 1474 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1475 } 1476 } 1477 1478 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1479 mmc_command_done(host->mmc, cmd->mrq); 1480 1481 /* 1482 * The host can send and interrupt when the busy state has 1483 * ended, allowing us to wait without wasting CPU cycles. 1484 * The busy signal uses DAT0 so this is similar to waiting 1485 * for data to complete. 1486 * 1487 * Note: The 1.0 specification is a bit ambiguous about this 1488 * feature so there might be some problems with older 1489 * controllers. 1490 */ 1491 if (cmd->flags & MMC_RSP_BUSY) { 1492 if (cmd->data) { 1493 DBG("Cannot wait for busy signal when also doing a data transfer"); 1494 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1495 cmd == host->data_cmd) { 1496 /* Command complete before busy is ended */ 1497 return; 1498 } 1499 } 1500 1501 /* Finished CMD23, now send actual command. */ 1502 if (cmd == cmd->mrq->sbc) { 1503 sdhci_send_command(host, cmd->mrq->cmd); 1504 } else { 1505 1506 /* Processed actual command. */ 1507 if (host->data && host->data_early) 1508 sdhci_finish_data(host); 1509 1510 if (!cmd->data) 1511 __sdhci_finish_mrq(host, cmd->mrq); 1512 } 1513 } 1514 1515 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1516 { 1517 u16 preset = 0; 1518 1519 switch (host->timing) { 1520 case MMC_TIMING_UHS_SDR12: 1521 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1522 break; 1523 case MMC_TIMING_UHS_SDR25: 1524 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1525 break; 1526 case MMC_TIMING_UHS_SDR50: 1527 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1528 break; 1529 case MMC_TIMING_UHS_SDR104: 1530 case MMC_TIMING_MMC_HS200: 1531 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1532 break; 1533 case MMC_TIMING_UHS_DDR50: 1534 case MMC_TIMING_MMC_DDR52: 1535 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1536 break; 1537 case MMC_TIMING_MMC_HS400: 1538 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1539 break; 1540 default: 1541 pr_warn("%s: Invalid UHS-I mode selected\n", 1542 mmc_hostname(host->mmc)); 1543 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1544 break; 1545 } 1546 return preset; 1547 } 1548 1549 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1550 unsigned int *actual_clock) 1551 { 1552 int div = 0; /* Initialized for compiler warning */ 1553 int real_div = div, clk_mul = 1; 1554 u16 clk = 0; 1555 bool switch_base_clk = false; 1556 1557 if (host->version >= SDHCI_SPEC_300) { 1558 if (host->preset_enabled) { 1559 u16 pre_val; 1560 1561 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1562 pre_val = sdhci_get_preset_value(host); 1563 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1564 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1565 if (host->clk_mul && 1566 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1567 clk = SDHCI_PROG_CLOCK_MODE; 1568 real_div = div + 1; 1569 clk_mul = host->clk_mul; 1570 } else { 1571 real_div = max_t(int, 1, div << 1); 1572 } 1573 goto clock_set; 1574 } 1575 1576 /* 1577 * Check if the Host Controller supports Programmable Clock 1578 * Mode. 1579 */ 1580 if (host->clk_mul) { 1581 for (div = 1; div <= 1024; div++) { 1582 if ((host->max_clk * host->clk_mul / div) 1583 <= clock) 1584 break; 1585 } 1586 if ((host->max_clk * host->clk_mul / div) <= clock) { 1587 /* 1588 * Set Programmable Clock Mode in the Clock 1589 * Control register. 1590 */ 1591 clk = SDHCI_PROG_CLOCK_MODE; 1592 real_div = div; 1593 clk_mul = host->clk_mul; 1594 div--; 1595 } else { 1596 /* 1597 * Divisor can be too small to reach clock 1598 * speed requirement. Then use the base clock. 1599 */ 1600 switch_base_clk = true; 1601 } 1602 } 1603 1604 if (!host->clk_mul || switch_base_clk) { 1605 /* Version 3.00 divisors must be a multiple of 2. */ 1606 if (host->max_clk <= clock) 1607 div = 1; 1608 else { 1609 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1610 div += 2) { 1611 if ((host->max_clk / div) <= clock) 1612 break; 1613 } 1614 } 1615 real_div = div; 1616 div >>= 1; 1617 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1618 && !div && host->max_clk <= 25000000) 1619 div = 1; 1620 } 1621 } else { 1622 /* Version 2.00 divisors must be a power of 2. */ 1623 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1624 if ((host->max_clk / div) <= clock) 1625 break; 1626 } 1627 real_div = div; 1628 div >>= 1; 1629 } 1630 1631 clock_set: 1632 if (real_div) 1633 *actual_clock = (host->max_clk * clk_mul) / real_div; 1634 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1635 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1636 << SDHCI_DIVIDER_HI_SHIFT; 1637 1638 return clk; 1639 } 1640 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1641 1642 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1643 { 1644 ktime_t timeout; 1645 1646 clk |= SDHCI_CLOCK_INT_EN; 1647 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1648 1649 /* Wait max 150 ms */ 1650 timeout = ktime_add_ms(ktime_get(), 150); 1651 while (1) { 1652 bool timedout = ktime_after(ktime_get(), timeout); 1653 1654 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1655 if (clk & SDHCI_CLOCK_INT_STABLE) 1656 break; 1657 if (timedout) { 1658 pr_err("%s: Internal clock never stabilised.\n", 1659 mmc_hostname(host->mmc)); 1660 sdhci_dumpregs(host); 1661 return; 1662 } 1663 udelay(10); 1664 } 1665 1666 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 1667 clk |= SDHCI_CLOCK_PLL_EN; 1668 clk &= ~SDHCI_CLOCK_INT_STABLE; 1669 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1670 1671 /* Wait max 150 ms */ 1672 timeout = ktime_add_ms(ktime_get(), 150); 1673 while (1) { 1674 bool timedout = ktime_after(ktime_get(), timeout); 1675 1676 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1677 if (clk & SDHCI_CLOCK_INT_STABLE) 1678 break; 1679 if (timedout) { 1680 pr_err("%s: PLL clock never stabilised.\n", 1681 mmc_hostname(host->mmc)); 1682 sdhci_dumpregs(host); 1683 return; 1684 } 1685 udelay(10); 1686 } 1687 } 1688 1689 clk |= SDHCI_CLOCK_CARD_EN; 1690 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1691 } 1692 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1693 1694 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1695 { 1696 u16 clk; 1697 1698 host->mmc->actual_clock = 0; 1699 1700 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1701 1702 if (clock == 0) 1703 return; 1704 1705 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1706 sdhci_enable_clk(host, clk); 1707 } 1708 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1709 1710 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1711 unsigned short vdd) 1712 { 1713 struct mmc_host *mmc = host->mmc; 1714 1715 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1716 1717 if (mode != MMC_POWER_OFF) 1718 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1719 else 1720 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1721 } 1722 1723 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 1724 unsigned short vdd) 1725 { 1726 u8 pwr = 0; 1727 1728 if (mode != MMC_POWER_OFF) { 1729 switch (1 << vdd) { 1730 case MMC_VDD_165_195: 1731 /* 1732 * Without a regulator, SDHCI does not support 2.0v 1733 * so we only get here if the driver deliberately 1734 * added the 2.0v range to ocr_avail. Map it to 1.8v 1735 * for the purpose of turning on the power. 1736 */ 1737 case MMC_VDD_20_21: 1738 pwr = SDHCI_POWER_180; 1739 break; 1740 case MMC_VDD_29_30: 1741 case MMC_VDD_30_31: 1742 pwr = SDHCI_POWER_300; 1743 break; 1744 case MMC_VDD_32_33: 1745 case MMC_VDD_33_34: 1746 pwr = SDHCI_POWER_330; 1747 break; 1748 default: 1749 WARN(1, "%s: Invalid vdd %#x\n", 1750 mmc_hostname(host->mmc), vdd); 1751 break; 1752 } 1753 } 1754 1755 if (host->pwr == pwr) 1756 return; 1757 1758 host->pwr = pwr; 1759 1760 if (pwr == 0) { 1761 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1762 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1763 sdhci_runtime_pm_bus_off(host); 1764 } else { 1765 /* 1766 * Spec says that we should clear the power reg before setting 1767 * a new value. Some controllers don't seem to like this though. 1768 */ 1769 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1770 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1771 1772 /* 1773 * At least the Marvell CaFe chip gets confused if we set the 1774 * voltage and set turn on power at the same time, so set the 1775 * voltage first. 1776 */ 1777 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1778 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1779 1780 pwr |= SDHCI_POWER_ON; 1781 1782 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1783 1784 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1785 sdhci_runtime_pm_bus_on(host); 1786 1787 /* 1788 * Some controllers need an extra 10ms delay of 10ms before 1789 * they can apply clock after applying power 1790 */ 1791 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1792 mdelay(10); 1793 } 1794 } 1795 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 1796 1797 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1798 unsigned short vdd) 1799 { 1800 if (IS_ERR(host->mmc->supply.vmmc)) 1801 sdhci_set_power_noreg(host, mode, vdd); 1802 else 1803 sdhci_set_power_reg(host, mode, vdd); 1804 } 1805 EXPORT_SYMBOL_GPL(sdhci_set_power); 1806 1807 /*****************************************************************************\ 1808 * * 1809 * MMC callbacks * 1810 * * 1811 \*****************************************************************************/ 1812 1813 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1814 { 1815 struct sdhci_host *host; 1816 int present; 1817 unsigned long flags; 1818 1819 host = mmc_priv(mmc); 1820 1821 /* Firstly check card presence */ 1822 present = mmc->ops->get_cd(mmc); 1823 1824 spin_lock_irqsave(&host->lock, flags); 1825 1826 sdhci_led_activate(host); 1827 1828 /* 1829 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1830 * requests if Auto-CMD12 is enabled. 1831 */ 1832 if (sdhci_auto_cmd12(host, mrq)) { 1833 if (mrq->stop) { 1834 mrq->data->stop = NULL; 1835 mrq->stop = NULL; 1836 } 1837 } 1838 1839 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1840 mrq->cmd->error = -ENOMEDIUM; 1841 sdhci_finish_mrq(host, mrq); 1842 } else { 1843 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1844 sdhci_send_command(host, mrq->sbc); 1845 else 1846 sdhci_send_command(host, mrq->cmd); 1847 } 1848 1849 spin_unlock_irqrestore(&host->lock, flags); 1850 } 1851 EXPORT_SYMBOL_GPL(sdhci_request); 1852 1853 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1854 { 1855 u8 ctrl; 1856 1857 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1858 if (width == MMC_BUS_WIDTH_8) { 1859 ctrl &= ~SDHCI_CTRL_4BITBUS; 1860 ctrl |= SDHCI_CTRL_8BITBUS; 1861 } else { 1862 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 1863 ctrl &= ~SDHCI_CTRL_8BITBUS; 1864 if (width == MMC_BUS_WIDTH_4) 1865 ctrl |= SDHCI_CTRL_4BITBUS; 1866 else 1867 ctrl &= ~SDHCI_CTRL_4BITBUS; 1868 } 1869 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1870 } 1871 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1872 1873 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1874 { 1875 u16 ctrl_2; 1876 1877 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1878 /* Select Bus Speed Mode for host */ 1879 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1880 if ((timing == MMC_TIMING_MMC_HS200) || 1881 (timing == MMC_TIMING_UHS_SDR104)) 1882 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1883 else if (timing == MMC_TIMING_UHS_SDR12) 1884 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1885 else if (timing == MMC_TIMING_UHS_SDR25) 1886 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1887 else if (timing == MMC_TIMING_UHS_SDR50) 1888 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1889 else if ((timing == MMC_TIMING_UHS_DDR50) || 1890 (timing == MMC_TIMING_MMC_DDR52)) 1891 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1892 else if (timing == MMC_TIMING_MMC_HS400) 1893 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1894 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1895 } 1896 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1897 1898 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1899 { 1900 struct sdhci_host *host = mmc_priv(mmc); 1901 u8 ctrl; 1902 1903 if (ios->power_mode == MMC_POWER_UNDEFINED) 1904 return; 1905 1906 if (host->flags & SDHCI_DEVICE_DEAD) { 1907 if (!IS_ERR(mmc->supply.vmmc) && 1908 ios->power_mode == MMC_POWER_OFF) 1909 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1910 return; 1911 } 1912 1913 /* 1914 * Reset the chip on each power off. 1915 * Should clear out any weird states. 1916 */ 1917 if (ios->power_mode == MMC_POWER_OFF) { 1918 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1919 sdhci_reinit(host); 1920 } 1921 1922 if (host->version >= SDHCI_SPEC_300 && 1923 (ios->power_mode == MMC_POWER_UP) && 1924 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1925 sdhci_enable_preset_value(host, false); 1926 1927 if (!ios->clock || ios->clock != host->clock) { 1928 host->ops->set_clock(host, ios->clock); 1929 host->clock = ios->clock; 1930 1931 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1932 host->clock) { 1933 host->timeout_clk = host->mmc->actual_clock ? 1934 host->mmc->actual_clock / 1000 : 1935 host->clock / 1000; 1936 host->mmc->max_busy_timeout = 1937 host->ops->get_max_timeout_count ? 1938 host->ops->get_max_timeout_count(host) : 1939 1 << 27; 1940 host->mmc->max_busy_timeout /= host->timeout_clk; 1941 } 1942 } 1943 1944 if (host->ops->set_power) 1945 host->ops->set_power(host, ios->power_mode, ios->vdd); 1946 else 1947 sdhci_set_power(host, ios->power_mode, ios->vdd); 1948 1949 if (host->ops->platform_send_init_74_clocks) 1950 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1951 1952 host->ops->set_bus_width(host, ios->bus_width); 1953 1954 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1955 1956 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 1957 if (ios->timing == MMC_TIMING_SD_HS || 1958 ios->timing == MMC_TIMING_MMC_HS || 1959 ios->timing == MMC_TIMING_MMC_HS400 || 1960 ios->timing == MMC_TIMING_MMC_HS200 || 1961 ios->timing == MMC_TIMING_MMC_DDR52 || 1962 ios->timing == MMC_TIMING_UHS_SDR50 || 1963 ios->timing == MMC_TIMING_UHS_SDR104 || 1964 ios->timing == MMC_TIMING_UHS_DDR50 || 1965 ios->timing == MMC_TIMING_UHS_SDR25) 1966 ctrl |= SDHCI_CTRL_HISPD; 1967 else 1968 ctrl &= ~SDHCI_CTRL_HISPD; 1969 } 1970 1971 if (host->version >= SDHCI_SPEC_300) { 1972 u16 clk, ctrl_2; 1973 1974 if (!host->preset_enabled) { 1975 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1976 /* 1977 * We only need to set Driver Strength if the 1978 * preset value enable is not set. 1979 */ 1980 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1981 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1982 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1983 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1984 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 1985 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1986 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1987 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1988 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 1989 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 1990 else { 1991 pr_warn("%s: invalid driver type, default to driver type B\n", 1992 mmc_hostname(mmc)); 1993 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1994 } 1995 1996 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1997 } else { 1998 /* 1999 * According to SDHC Spec v3.00, if the Preset Value 2000 * Enable in the Host Control 2 register is set, we 2001 * need to reset SD Clock Enable before changing High 2002 * Speed Enable to avoid generating clock gliches. 2003 */ 2004 2005 /* Reset SD Clock Enable */ 2006 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2007 clk &= ~SDHCI_CLOCK_CARD_EN; 2008 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2009 2010 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2011 2012 /* Re-enable SD Clock */ 2013 host->ops->set_clock(host, host->clock); 2014 } 2015 2016 /* Reset SD Clock Enable */ 2017 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2018 clk &= ~SDHCI_CLOCK_CARD_EN; 2019 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2020 2021 host->ops->set_uhs_signaling(host, ios->timing); 2022 host->timing = ios->timing; 2023 2024 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2025 ((ios->timing == MMC_TIMING_UHS_SDR12) || 2026 (ios->timing == MMC_TIMING_UHS_SDR25) || 2027 (ios->timing == MMC_TIMING_UHS_SDR50) || 2028 (ios->timing == MMC_TIMING_UHS_SDR104) || 2029 (ios->timing == MMC_TIMING_UHS_DDR50) || 2030 (ios->timing == MMC_TIMING_MMC_DDR52))) { 2031 u16 preset; 2032 2033 sdhci_enable_preset_value(host, true); 2034 preset = sdhci_get_preset_value(host); 2035 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 2036 >> SDHCI_PRESET_DRV_SHIFT; 2037 } 2038 2039 /* Re-enable SD Clock */ 2040 host->ops->set_clock(host, host->clock); 2041 } else 2042 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2043 2044 /* 2045 * Some (ENE) controllers go apeshit on some ios operation, 2046 * signalling timeout and CRC errors even on CMD0. Resetting 2047 * it on each ios seems to solve the problem. 2048 */ 2049 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 2050 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2051 } 2052 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2053 2054 static int sdhci_get_cd(struct mmc_host *mmc) 2055 { 2056 struct sdhci_host *host = mmc_priv(mmc); 2057 int gpio_cd = mmc_gpio_get_cd(mmc); 2058 2059 if (host->flags & SDHCI_DEVICE_DEAD) 2060 return 0; 2061 2062 /* If nonremovable, assume that the card is always present. */ 2063 if (!mmc_card_is_removable(host->mmc)) 2064 return 1; 2065 2066 /* 2067 * Try slot gpio detect, if defined it take precedence 2068 * over build in controller functionality 2069 */ 2070 if (gpio_cd >= 0) 2071 return !!gpio_cd; 2072 2073 /* If polling, assume that the card is always present. */ 2074 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2075 return 1; 2076 2077 /* Host native card detect */ 2078 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2079 } 2080 2081 static int sdhci_check_ro(struct sdhci_host *host) 2082 { 2083 unsigned long flags; 2084 int is_readonly; 2085 2086 spin_lock_irqsave(&host->lock, flags); 2087 2088 if (host->flags & SDHCI_DEVICE_DEAD) 2089 is_readonly = 0; 2090 else if (host->ops->get_ro) 2091 is_readonly = host->ops->get_ro(host); 2092 else if (mmc_can_gpio_ro(host->mmc)) 2093 is_readonly = mmc_gpio_get_ro(host->mmc); 2094 else 2095 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2096 & SDHCI_WRITE_PROTECT); 2097 2098 spin_unlock_irqrestore(&host->lock, flags); 2099 2100 /* This quirk needs to be replaced by a callback-function later */ 2101 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2102 !is_readonly : is_readonly; 2103 } 2104 2105 #define SAMPLE_COUNT 5 2106 2107 static int sdhci_get_ro(struct mmc_host *mmc) 2108 { 2109 struct sdhci_host *host = mmc_priv(mmc); 2110 int i, ro_count; 2111 2112 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2113 return sdhci_check_ro(host); 2114 2115 ro_count = 0; 2116 for (i = 0; i < SAMPLE_COUNT; i++) { 2117 if (sdhci_check_ro(host)) { 2118 if (++ro_count > SAMPLE_COUNT / 2) 2119 return 1; 2120 } 2121 msleep(30); 2122 } 2123 return 0; 2124 } 2125 2126 static void sdhci_hw_reset(struct mmc_host *mmc) 2127 { 2128 struct sdhci_host *host = mmc_priv(mmc); 2129 2130 if (host->ops && host->ops->hw_reset) 2131 host->ops->hw_reset(host); 2132 } 2133 2134 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2135 { 2136 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2137 if (enable) 2138 host->ier |= SDHCI_INT_CARD_INT; 2139 else 2140 host->ier &= ~SDHCI_INT_CARD_INT; 2141 2142 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2143 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2144 } 2145 } 2146 2147 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2148 { 2149 struct sdhci_host *host = mmc_priv(mmc); 2150 unsigned long flags; 2151 2152 if (enable) 2153 pm_runtime_get_noresume(host->mmc->parent); 2154 2155 spin_lock_irqsave(&host->lock, flags); 2156 sdhci_enable_sdio_irq_nolock(host, enable); 2157 spin_unlock_irqrestore(&host->lock, flags); 2158 2159 if (!enable) 2160 pm_runtime_put_noidle(host->mmc->parent); 2161 } 2162 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2163 2164 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2165 { 2166 struct sdhci_host *host = mmc_priv(mmc); 2167 unsigned long flags; 2168 2169 spin_lock_irqsave(&host->lock, flags); 2170 sdhci_enable_sdio_irq_nolock(host, true); 2171 spin_unlock_irqrestore(&host->lock, flags); 2172 } 2173 2174 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2175 struct mmc_ios *ios) 2176 { 2177 struct sdhci_host *host = mmc_priv(mmc); 2178 u16 ctrl; 2179 int ret; 2180 2181 /* 2182 * Signal Voltage Switching is only applicable for Host Controllers 2183 * v3.00 and above. 2184 */ 2185 if (host->version < SDHCI_SPEC_300) 2186 return 0; 2187 2188 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2189 2190 switch (ios->signal_voltage) { 2191 case MMC_SIGNAL_VOLTAGE_330: 2192 if (!(host->flags & SDHCI_SIGNALING_330)) 2193 return -EINVAL; 2194 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2195 ctrl &= ~SDHCI_CTRL_VDD_180; 2196 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2197 2198 if (!IS_ERR(mmc->supply.vqmmc)) { 2199 ret = mmc_regulator_set_vqmmc(mmc, ios); 2200 if (ret) { 2201 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2202 mmc_hostname(mmc)); 2203 return -EIO; 2204 } 2205 } 2206 /* Wait for 5ms */ 2207 usleep_range(5000, 5500); 2208 2209 /* 3.3V regulator output should be stable within 5 ms */ 2210 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2211 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2212 return 0; 2213 2214 pr_warn("%s: 3.3V regulator output did not become stable\n", 2215 mmc_hostname(mmc)); 2216 2217 return -EAGAIN; 2218 case MMC_SIGNAL_VOLTAGE_180: 2219 if (!(host->flags & SDHCI_SIGNALING_180)) 2220 return -EINVAL; 2221 if (!IS_ERR(mmc->supply.vqmmc)) { 2222 ret = mmc_regulator_set_vqmmc(mmc, ios); 2223 if (ret) { 2224 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2225 mmc_hostname(mmc)); 2226 return -EIO; 2227 } 2228 } 2229 2230 /* 2231 * Enable 1.8V Signal Enable in the Host Control2 2232 * register 2233 */ 2234 ctrl |= SDHCI_CTRL_VDD_180; 2235 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2236 2237 /* Some controller need to do more when switching */ 2238 if (host->ops->voltage_switch) 2239 host->ops->voltage_switch(host); 2240 2241 /* 1.8V regulator output should be stable within 5 ms */ 2242 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2243 if (ctrl & SDHCI_CTRL_VDD_180) 2244 return 0; 2245 2246 pr_warn("%s: 1.8V regulator output did not become stable\n", 2247 mmc_hostname(mmc)); 2248 2249 return -EAGAIN; 2250 case MMC_SIGNAL_VOLTAGE_120: 2251 if (!(host->flags & SDHCI_SIGNALING_120)) 2252 return -EINVAL; 2253 if (!IS_ERR(mmc->supply.vqmmc)) { 2254 ret = mmc_regulator_set_vqmmc(mmc, ios); 2255 if (ret) { 2256 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2257 mmc_hostname(mmc)); 2258 return -EIO; 2259 } 2260 } 2261 return 0; 2262 default: 2263 /* No signal voltage switch required */ 2264 return 0; 2265 } 2266 } 2267 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2268 2269 static int sdhci_card_busy(struct mmc_host *mmc) 2270 { 2271 struct sdhci_host *host = mmc_priv(mmc); 2272 u32 present_state; 2273 2274 /* Check whether DAT[0] is 0 */ 2275 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2276 2277 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2278 } 2279 2280 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2281 { 2282 struct sdhci_host *host = mmc_priv(mmc); 2283 unsigned long flags; 2284 2285 spin_lock_irqsave(&host->lock, flags); 2286 host->flags |= SDHCI_HS400_TUNING; 2287 spin_unlock_irqrestore(&host->lock, flags); 2288 2289 return 0; 2290 } 2291 2292 void sdhci_start_tuning(struct sdhci_host *host) 2293 { 2294 u16 ctrl; 2295 2296 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2297 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2298 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2299 ctrl |= SDHCI_CTRL_TUNED_CLK; 2300 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2301 2302 /* 2303 * As per the Host Controller spec v3.00, tuning command 2304 * generates Buffer Read Ready interrupt, so enable that. 2305 * 2306 * Note: The spec clearly says that when tuning sequence 2307 * is being performed, the controller does not generate 2308 * interrupts other than Buffer Read Ready interrupt. But 2309 * to make sure we don't hit a controller bug, we _only_ 2310 * enable Buffer Read Ready interrupt here. 2311 */ 2312 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2313 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2314 } 2315 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2316 2317 void sdhci_end_tuning(struct sdhci_host *host) 2318 { 2319 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2320 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2321 } 2322 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2323 2324 void sdhci_reset_tuning(struct sdhci_host *host) 2325 { 2326 u16 ctrl; 2327 2328 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2329 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2330 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2331 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2332 } 2333 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2334 2335 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2336 { 2337 sdhci_reset_tuning(host); 2338 2339 sdhci_do_reset(host, SDHCI_RESET_CMD); 2340 sdhci_do_reset(host, SDHCI_RESET_DATA); 2341 2342 sdhci_end_tuning(host); 2343 2344 mmc_abort_tuning(host->mmc, opcode); 2345 } 2346 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2347 2348 /* 2349 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2350 * tuning command does not have a data payload (or rather the hardware does it 2351 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2352 * interrupt setup is different to other commands and there is no timeout 2353 * interrupt so special handling is needed. 2354 */ 2355 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2356 { 2357 struct mmc_host *mmc = host->mmc; 2358 struct mmc_command cmd = {}; 2359 struct mmc_request mrq = {}; 2360 unsigned long flags; 2361 u32 b = host->sdma_boundary; 2362 2363 spin_lock_irqsave(&host->lock, flags); 2364 2365 cmd.opcode = opcode; 2366 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2367 cmd.mrq = &mrq; 2368 2369 mrq.cmd = &cmd; 2370 /* 2371 * In response to CMD19, the card sends 64 bytes of tuning 2372 * block to the Host Controller. So we set the block size 2373 * to 64 here. 2374 */ 2375 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2376 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2377 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2378 else 2379 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2380 2381 /* 2382 * The tuning block is sent by the card to the host controller. 2383 * So we set the TRNS_READ bit in the Transfer Mode register. 2384 * This also takes care of setting DMA Enable and Multi Block 2385 * Select in the same register to 0. 2386 */ 2387 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2388 2389 sdhci_send_command(host, &cmd); 2390 2391 host->cmd = NULL; 2392 2393 sdhci_del_timer(host, &mrq); 2394 2395 host->tuning_done = 0; 2396 2397 spin_unlock_irqrestore(&host->lock, flags); 2398 2399 /* Wait for Buffer Read Ready interrupt */ 2400 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2401 msecs_to_jiffies(50)); 2402 2403 } 2404 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2405 2406 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2407 { 2408 int i; 2409 2410 /* 2411 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2412 * of loops reaches tuning loop count. 2413 */ 2414 for (i = 0; i < host->tuning_loop_count; i++) { 2415 u16 ctrl; 2416 2417 sdhci_send_tuning(host, opcode); 2418 2419 if (!host->tuning_done) { 2420 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2421 mmc_hostname(host->mmc)); 2422 sdhci_abort_tuning(host, opcode); 2423 return -ETIMEDOUT; 2424 } 2425 2426 /* Spec does not require a delay between tuning cycles */ 2427 if (host->tuning_delay > 0) 2428 mdelay(host->tuning_delay); 2429 2430 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2431 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2432 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2433 return 0; /* Success! */ 2434 break; 2435 } 2436 2437 } 2438 2439 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2440 mmc_hostname(host->mmc)); 2441 sdhci_reset_tuning(host); 2442 return -EAGAIN; 2443 } 2444 2445 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2446 { 2447 struct sdhci_host *host = mmc_priv(mmc); 2448 int err = 0; 2449 unsigned int tuning_count = 0; 2450 bool hs400_tuning; 2451 2452 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2453 2454 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2455 tuning_count = host->tuning_count; 2456 2457 /* 2458 * The Host Controller needs tuning in case of SDR104 and DDR50 2459 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2460 * the Capabilities register. 2461 * If the Host Controller supports the HS200 mode then the 2462 * tuning function has to be executed. 2463 */ 2464 switch (host->timing) { 2465 /* HS400 tuning is done in HS200 mode */ 2466 case MMC_TIMING_MMC_HS400: 2467 err = -EINVAL; 2468 goto out; 2469 2470 case MMC_TIMING_MMC_HS200: 2471 /* 2472 * Periodic re-tuning for HS400 is not expected to be needed, so 2473 * disable it here. 2474 */ 2475 if (hs400_tuning) 2476 tuning_count = 0; 2477 break; 2478 2479 case MMC_TIMING_UHS_SDR104: 2480 case MMC_TIMING_UHS_DDR50: 2481 break; 2482 2483 case MMC_TIMING_UHS_SDR50: 2484 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2485 break; 2486 /* FALLTHROUGH */ 2487 2488 default: 2489 goto out; 2490 } 2491 2492 if (host->ops->platform_execute_tuning) { 2493 err = host->ops->platform_execute_tuning(host, opcode); 2494 goto out; 2495 } 2496 2497 host->mmc->retune_period = tuning_count; 2498 2499 if (host->tuning_delay < 0) 2500 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2501 2502 sdhci_start_tuning(host); 2503 2504 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2505 2506 sdhci_end_tuning(host); 2507 out: 2508 host->flags &= ~SDHCI_HS400_TUNING; 2509 2510 return err; 2511 } 2512 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2513 2514 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2515 { 2516 /* Host Controller v3.00 defines preset value registers */ 2517 if (host->version < SDHCI_SPEC_300) 2518 return; 2519 2520 /* 2521 * We only enable or disable Preset Value if they are not already 2522 * enabled or disabled respectively. Otherwise, we bail out. 2523 */ 2524 if (host->preset_enabled != enable) { 2525 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2526 2527 if (enable) 2528 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2529 else 2530 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2531 2532 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2533 2534 if (enable) 2535 host->flags |= SDHCI_PV_ENABLED; 2536 else 2537 host->flags &= ~SDHCI_PV_ENABLED; 2538 2539 host->preset_enabled = enable; 2540 } 2541 } 2542 2543 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2544 int err) 2545 { 2546 struct sdhci_host *host = mmc_priv(mmc); 2547 struct mmc_data *data = mrq->data; 2548 2549 if (data->host_cookie != COOKIE_UNMAPPED) 2550 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2551 mmc_get_dma_dir(data)); 2552 2553 data->host_cookie = COOKIE_UNMAPPED; 2554 } 2555 2556 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2557 { 2558 struct sdhci_host *host = mmc_priv(mmc); 2559 2560 mrq->data->host_cookie = COOKIE_UNMAPPED; 2561 2562 /* 2563 * No pre-mapping in the pre hook if we're using the bounce buffer, 2564 * for that we would need two bounce buffers since one buffer is 2565 * in flight when this is getting called. 2566 */ 2567 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2568 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2569 } 2570 2571 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2572 { 2573 if (host->data_cmd) { 2574 host->data_cmd->error = err; 2575 sdhci_finish_mrq(host, host->data_cmd->mrq); 2576 } 2577 2578 if (host->cmd) { 2579 host->cmd->error = err; 2580 sdhci_finish_mrq(host, host->cmd->mrq); 2581 } 2582 } 2583 2584 static void sdhci_card_event(struct mmc_host *mmc) 2585 { 2586 struct sdhci_host *host = mmc_priv(mmc); 2587 unsigned long flags; 2588 int present; 2589 2590 /* First check if client has provided their own card event */ 2591 if (host->ops->card_event) 2592 host->ops->card_event(host); 2593 2594 present = mmc->ops->get_cd(mmc); 2595 2596 spin_lock_irqsave(&host->lock, flags); 2597 2598 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2599 if (sdhci_has_requests(host) && !present) { 2600 pr_err("%s: Card removed during transfer!\n", 2601 mmc_hostname(host->mmc)); 2602 pr_err("%s: Resetting controller.\n", 2603 mmc_hostname(host->mmc)); 2604 2605 sdhci_do_reset(host, SDHCI_RESET_CMD); 2606 sdhci_do_reset(host, SDHCI_RESET_DATA); 2607 2608 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2609 } 2610 2611 spin_unlock_irqrestore(&host->lock, flags); 2612 } 2613 2614 static const struct mmc_host_ops sdhci_ops = { 2615 .request = sdhci_request, 2616 .post_req = sdhci_post_req, 2617 .pre_req = sdhci_pre_req, 2618 .set_ios = sdhci_set_ios, 2619 .get_cd = sdhci_get_cd, 2620 .get_ro = sdhci_get_ro, 2621 .hw_reset = sdhci_hw_reset, 2622 .enable_sdio_irq = sdhci_enable_sdio_irq, 2623 .ack_sdio_irq = sdhci_ack_sdio_irq, 2624 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2625 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2626 .execute_tuning = sdhci_execute_tuning, 2627 .card_event = sdhci_card_event, 2628 .card_busy = sdhci_card_busy, 2629 }; 2630 2631 /*****************************************************************************\ 2632 * * 2633 * Request done * 2634 * * 2635 \*****************************************************************************/ 2636 2637 static bool sdhci_request_done(struct sdhci_host *host) 2638 { 2639 unsigned long flags; 2640 struct mmc_request *mrq; 2641 int i; 2642 2643 spin_lock_irqsave(&host->lock, flags); 2644 2645 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2646 mrq = host->mrqs_done[i]; 2647 if (mrq) 2648 break; 2649 } 2650 2651 if (!mrq) { 2652 spin_unlock_irqrestore(&host->lock, flags); 2653 return true; 2654 } 2655 2656 /* 2657 * Always unmap the data buffers if they were mapped by 2658 * sdhci_prepare_data() whenever we finish with a request. 2659 * This avoids leaking DMA mappings on error. 2660 */ 2661 if (host->flags & SDHCI_REQ_USE_DMA) { 2662 struct mmc_data *data = mrq->data; 2663 2664 if (data && data->host_cookie == COOKIE_MAPPED) { 2665 if (host->bounce_buffer) { 2666 /* 2667 * On reads, copy the bounced data into the 2668 * sglist 2669 */ 2670 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 2671 unsigned int length = data->bytes_xfered; 2672 2673 if (length > host->bounce_buffer_size) { 2674 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 2675 mmc_hostname(host->mmc), 2676 host->bounce_buffer_size, 2677 data->bytes_xfered); 2678 /* Cap it down and continue */ 2679 length = host->bounce_buffer_size; 2680 } 2681 dma_sync_single_for_cpu( 2682 host->mmc->parent, 2683 host->bounce_addr, 2684 host->bounce_buffer_size, 2685 DMA_FROM_DEVICE); 2686 sg_copy_from_buffer(data->sg, 2687 data->sg_len, 2688 host->bounce_buffer, 2689 length); 2690 } else { 2691 /* No copying, just switch ownership */ 2692 dma_sync_single_for_cpu( 2693 host->mmc->parent, 2694 host->bounce_addr, 2695 host->bounce_buffer_size, 2696 mmc_get_dma_dir(data)); 2697 } 2698 } else { 2699 /* Unmap the raw data */ 2700 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 2701 data->sg_len, 2702 mmc_get_dma_dir(data)); 2703 } 2704 data->host_cookie = COOKIE_UNMAPPED; 2705 } 2706 } 2707 2708 /* 2709 * The controller needs a reset of internal state machines 2710 * upon error conditions. 2711 */ 2712 if (sdhci_needs_reset(host, mrq)) { 2713 /* 2714 * Do not finish until command and data lines are available for 2715 * reset. Note there can only be one other mrq, so it cannot 2716 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 2717 * would both be null. 2718 */ 2719 if (host->cmd || host->data_cmd) { 2720 spin_unlock_irqrestore(&host->lock, flags); 2721 return true; 2722 } 2723 2724 /* Some controllers need this kick or reset won't work here */ 2725 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2726 /* This is to force an update */ 2727 host->ops->set_clock(host, host->clock); 2728 2729 /* Spec says we should do both at the same time, but Ricoh 2730 controllers do not like that. */ 2731 sdhci_do_reset(host, SDHCI_RESET_CMD); 2732 sdhci_do_reset(host, SDHCI_RESET_DATA); 2733 2734 host->pending_reset = false; 2735 } 2736 2737 host->mrqs_done[i] = NULL; 2738 2739 spin_unlock_irqrestore(&host->lock, flags); 2740 2741 mmc_request_done(host->mmc, mrq); 2742 2743 return false; 2744 } 2745 2746 static void sdhci_complete_work(struct work_struct *work) 2747 { 2748 struct sdhci_host *host = container_of(work, struct sdhci_host, 2749 complete_work); 2750 2751 while (!sdhci_request_done(host)) 2752 ; 2753 } 2754 2755 static void sdhci_timeout_timer(struct timer_list *t) 2756 { 2757 struct sdhci_host *host; 2758 unsigned long flags; 2759 2760 host = from_timer(host, t, timer); 2761 2762 spin_lock_irqsave(&host->lock, flags); 2763 2764 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 2765 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 2766 mmc_hostname(host->mmc)); 2767 sdhci_dumpregs(host); 2768 2769 host->cmd->error = -ETIMEDOUT; 2770 sdhci_finish_mrq(host, host->cmd->mrq); 2771 } 2772 2773 spin_unlock_irqrestore(&host->lock, flags); 2774 } 2775 2776 static void sdhci_timeout_data_timer(struct timer_list *t) 2777 { 2778 struct sdhci_host *host; 2779 unsigned long flags; 2780 2781 host = from_timer(host, t, data_timer); 2782 2783 spin_lock_irqsave(&host->lock, flags); 2784 2785 if (host->data || host->data_cmd || 2786 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 2787 pr_err("%s: Timeout waiting for hardware interrupt.\n", 2788 mmc_hostname(host->mmc)); 2789 sdhci_dumpregs(host); 2790 2791 if (host->data) { 2792 host->data->error = -ETIMEDOUT; 2793 sdhci_finish_data(host); 2794 queue_work(host->complete_wq, &host->complete_work); 2795 } else if (host->data_cmd) { 2796 host->data_cmd->error = -ETIMEDOUT; 2797 sdhci_finish_mrq(host, host->data_cmd->mrq); 2798 } else { 2799 host->cmd->error = -ETIMEDOUT; 2800 sdhci_finish_mrq(host, host->cmd->mrq); 2801 } 2802 } 2803 2804 spin_unlock_irqrestore(&host->lock, flags); 2805 } 2806 2807 /*****************************************************************************\ 2808 * * 2809 * Interrupt handling * 2810 * * 2811 \*****************************************************************************/ 2812 2813 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 2814 { 2815 /* Handle auto-CMD12 error */ 2816 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 2817 struct mmc_request *mrq = host->data_cmd->mrq; 2818 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 2819 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 2820 SDHCI_INT_DATA_TIMEOUT : 2821 SDHCI_INT_DATA_CRC; 2822 2823 /* Treat auto-CMD12 error the same as data error */ 2824 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 2825 *intmask_p |= data_err_bit; 2826 return; 2827 } 2828 } 2829 2830 if (!host->cmd) { 2831 /* 2832 * SDHCI recovers from errors by resetting the cmd and data 2833 * circuits. Until that is done, there very well might be more 2834 * interrupts, so ignore them in that case. 2835 */ 2836 if (host->pending_reset) 2837 return; 2838 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 2839 mmc_hostname(host->mmc), (unsigned)intmask); 2840 sdhci_dumpregs(host); 2841 return; 2842 } 2843 2844 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 2845 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 2846 if (intmask & SDHCI_INT_TIMEOUT) 2847 host->cmd->error = -ETIMEDOUT; 2848 else 2849 host->cmd->error = -EILSEQ; 2850 2851 /* Treat data command CRC error the same as data CRC error */ 2852 if (host->cmd->data && 2853 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 2854 SDHCI_INT_CRC) { 2855 host->cmd = NULL; 2856 *intmask_p |= SDHCI_INT_DATA_CRC; 2857 return; 2858 } 2859 2860 __sdhci_finish_mrq(host, host->cmd->mrq); 2861 return; 2862 } 2863 2864 /* Handle auto-CMD23 error */ 2865 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 2866 struct mmc_request *mrq = host->cmd->mrq; 2867 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 2868 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 2869 -ETIMEDOUT : 2870 -EILSEQ; 2871 2872 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 2873 mrq->sbc->error = err; 2874 __sdhci_finish_mrq(host, mrq); 2875 return; 2876 } 2877 } 2878 2879 if (intmask & SDHCI_INT_RESPONSE) 2880 sdhci_finish_command(host); 2881 } 2882 2883 static void sdhci_adma_show_error(struct sdhci_host *host) 2884 { 2885 void *desc = host->adma_table; 2886 dma_addr_t dma = host->adma_addr; 2887 2888 sdhci_dumpregs(host); 2889 2890 while (true) { 2891 struct sdhci_adma2_64_desc *dma_desc = desc; 2892 2893 if (host->flags & SDHCI_USE_64_BIT_DMA) 2894 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2895 (unsigned long long)dma, 2896 le32_to_cpu(dma_desc->addr_hi), 2897 le32_to_cpu(dma_desc->addr_lo), 2898 le16_to_cpu(dma_desc->len), 2899 le16_to_cpu(dma_desc->cmd)); 2900 else 2901 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2902 (unsigned long long)dma, 2903 le32_to_cpu(dma_desc->addr_lo), 2904 le16_to_cpu(dma_desc->len), 2905 le16_to_cpu(dma_desc->cmd)); 2906 2907 desc += host->desc_sz; 2908 dma += host->desc_sz; 2909 2910 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2911 break; 2912 } 2913 } 2914 2915 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2916 { 2917 u32 command; 2918 2919 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2920 if (intmask & SDHCI_INT_DATA_AVAIL) { 2921 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2922 if (command == MMC_SEND_TUNING_BLOCK || 2923 command == MMC_SEND_TUNING_BLOCK_HS200) { 2924 host->tuning_done = 1; 2925 wake_up(&host->buf_ready_int); 2926 return; 2927 } 2928 } 2929 2930 if (!host->data) { 2931 struct mmc_command *data_cmd = host->data_cmd; 2932 2933 /* 2934 * The "data complete" interrupt is also used to 2935 * indicate that a busy state has ended. See comment 2936 * above in sdhci_cmd_irq(). 2937 */ 2938 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 2939 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2940 host->data_cmd = NULL; 2941 data_cmd->error = -ETIMEDOUT; 2942 __sdhci_finish_mrq(host, data_cmd->mrq); 2943 return; 2944 } 2945 if (intmask & SDHCI_INT_DATA_END) { 2946 host->data_cmd = NULL; 2947 /* 2948 * Some cards handle busy-end interrupt 2949 * before the command completed, so make 2950 * sure we do things in the proper order. 2951 */ 2952 if (host->cmd == data_cmd) 2953 return; 2954 2955 __sdhci_finish_mrq(host, data_cmd->mrq); 2956 return; 2957 } 2958 } 2959 2960 /* 2961 * SDHCI recovers from errors by resetting the cmd and data 2962 * circuits. Until that is done, there very well might be more 2963 * interrupts, so ignore them in that case. 2964 */ 2965 if (host->pending_reset) 2966 return; 2967 2968 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 2969 mmc_hostname(host->mmc), (unsigned)intmask); 2970 sdhci_dumpregs(host); 2971 2972 return; 2973 } 2974 2975 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2976 host->data->error = -ETIMEDOUT; 2977 else if (intmask & SDHCI_INT_DATA_END_BIT) 2978 host->data->error = -EILSEQ; 2979 else if ((intmask & SDHCI_INT_DATA_CRC) && 2980 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2981 != MMC_BUS_TEST_R) 2982 host->data->error = -EILSEQ; 2983 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2984 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 2985 intmask); 2986 sdhci_adma_show_error(host); 2987 host->data->error = -EIO; 2988 if (host->ops->adma_workaround) 2989 host->ops->adma_workaround(host, intmask); 2990 } 2991 2992 if (host->data->error) 2993 sdhci_finish_data(host); 2994 else { 2995 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2996 sdhci_transfer_pio(host); 2997 2998 /* 2999 * We currently don't do anything fancy with DMA 3000 * boundaries, but as we can't disable the feature 3001 * we need to at least restart the transfer. 3002 * 3003 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3004 * should return a valid address to continue from, but as 3005 * some controllers are faulty, don't trust them. 3006 */ 3007 if (intmask & SDHCI_INT_DMA_END) { 3008 dma_addr_t dmastart, dmanow; 3009 3010 dmastart = sdhci_sdma_address(host); 3011 dmanow = dmastart + host->data->bytes_xfered; 3012 /* 3013 * Force update to the next DMA block boundary. 3014 */ 3015 dmanow = (dmanow & 3016 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3017 SDHCI_DEFAULT_BOUNDARY_SIZE; 3018 host->data->bytes_xfered = dmanow - dmastart; 3019 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3020 &dmastart, host->data->bytes_xfered, &dmanow); 3021 sdhci_set_sdma_addr(host, dmanow); 3022 } 3023 3024 if (intmask & SDHCI_INT_DATA_END) { 3025 if (host->cmd == host->data_cmd) { 3026 /* 3027 * Data managed to finish before the 3028 * command completed. Make sure we do 3029 * things in the proper order. 3030 */ 3031 host->data_early = 1; 3032 } else { 3033 sdhci_finish_data(host); 3034 } 3035 } 3036 } 3037 } 3038 3039 static inline bool sdhci_defer_done(struct sdhci_host *host, 3040 struct mmc_request *mrq) 3041 { 3042 struct mmc_data *data = mrq->data; 3043 3044 return host->pending_reset || 3045 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3046 data->host_cookie == COOKIE_MAPPED); 3047 } 3048 3049 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3050 { 3051 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3052 irqreturn_t result = IRQ_NONE; 3053 struct sdhci_host *host = dev_id; 3054 u32 intmask, mask, unexpected = 0; 3055 int max_loops = 16; 3056 int i; 3057 3058 spin_lock(&host->lock); 3059 3060 if (host->runtime_suspended) { 3061 spin_unlock(&host->lock); 3062 return IRQ_NONE; 3063 } 3064 3065 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3066 if (!intmask || intmask == 0xffffffff) { 3067 result = IRQ_NONE; 3068 goto out; 3069 } 3070 3071 do { 3072 DBG("IRQ status 0x%08x\n", intmask); 3073 3074 if (host->ops->irq) { 3075 intmask = host->ops->irq(host, intmask); 3076 if (!intmask) 3077 goto cont; 3078 } 3079 3080 /* Clear selected interrupts. */ 3081 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3082 SDHCI_INT_BUS_POWER); 3083 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3084 3085 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3086 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3087 SDHCI_CARD_PRESENT; 3088 3089 /* 3090 * There is a observation on i.mx esdhc. INSERT 3091 * bit will be immediately set again when it gets 3092 * cleared, if a card is inserted. We have to mask 3093 * the irq to prevent interrupt storm which will 3094 * freeze the system. And the REMOVE gets the 3095 * same situation. 3096 * 3097 * More testing are needed here to ensure it works 3098 * for other platforms though. 3099 */ 3100 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3101 SDHCI_INT_CARD_REMOVE); 3102 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3103 SDHCI_INT_CARD_INSERT; 3104 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3105 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3106 3107 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3108 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3109 3110 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3111 SDHCI_INT_CARD_REMOVE); 3112 result = IRQ_WAKE_THREAD; 3113 } 3114 3115 if (intmask & SDHCI_INT_CMD_MASK) 3116 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3117 3118 if (intmask & SDHCI_INT_DATA_MASK) 3119 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3120 3121 if (intmask & SDHCI_INT_BUS_POWER) 3122 pr_err("%s: Card is consuming too much power!\n", 3123 mmc_hostname(host->mmc)); 3124 3125 if (intmask & SDHCI_INT_RETUNE) 3126 mmc_retune_needed(host->mmc); 3127 3128 if ((intmask & SDHCI_INT_CARD_INT) && 3129 (host->ier & SDHCI_INT_CARD_INT)) { 3130 sdhci_enable_sdio_irq_nolock(host, false); 3131 sdio_signal_irq(host->mmc); 3132 } 3133 3134 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3135 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3136 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3137 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3138 3139 if (intmask) { 3140 unexpected |= intmask; 3141 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3142 } 3143 cont: 3144 if (result == IRQ_NONE) 3145 result = IRQ_HANDLED; 3146 3147 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3148 } while (intmask && --max_loops); 3149 3150 /* Determine if mrqs can be completed immediately */ 3151 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3152 struct mmc_request *mrq = host->mrqs_done[i]; 3153 3154 if (!mrq) 3155 continue; 3156 3157 if (sdhci_defer_done(host, mrq)) { 3158 result = IRQ_WAKE_THREAD; 3159 } else { 3160 mrqs_done[i] = mrq; 3161 host->mrqs_done[i] = NULL; 3162 } 3163 } 3164 out: 3165 spin_unlock(&host->lock); 3166 3167 /* Process mrqs ready for immediate completion */ 3168 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3169 if (mrqs_done[i]) 3170 mmc_request_done(host->mmc, mrqs_done[i]); 3171 } 3172 3173 if (unexpected) { 3174 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3175 mmc_hostname(host->mmc), unexpected); 3176 sdhci_dumpregs(host); 3177 } 3178 3179 return result; 3180 } 3181 3182 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3183 { 3184 struct sdhci_host *host = dev_id; 3185 unsigned long flags; 3186 u32 isr; 3187 3188 while (!sdhci_request_done(host)) 3189 ; 3190 3191 spin_lock_irqsave(&host->lock, flags); 3192 isr = host->thread_isr; 3193 host->thread_isr = 0; 3194 spin_unlock_irqrestore(&host->lock, flags); 3195 3196 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3197 struct mmc_host *mmc = host->mmc; 3198 3199 mmc->ops->card_event(mmc); 3200 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3201 } 3202 3203 return IRQ_HANDLED; 3204 } 3205 3206 /*****************************************************************************\ 3207 * * 3208 * Suspend/resume * 3209 * * 3210 \*****************************************************************************/ 3211 3212 #ifdef CONFIG_PM 3213 3214 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3215 { 3216 return mmc_card_is_removable(host->mmc) && 3217 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3218 !mmc_can_gpio_cd(host->mmc); 3219 } 3220 3221 /* 3222 * To enable wakeup events, the corresponding events have to be enabled in 3223 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3224 * Table' in the SD Host Controller Standard Specification. 3225 * It is useless to restore SDHCI_INT_ENABLE state in 3226 * sdhci_disable_irq_wakeups() since it will be set by 3227 * sdhci_enable_card_detection() or sdhci_init(). 3228 */ 3229 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3230 { 3231 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3232 SDHCI_WAKE_ON_INT; 3233 u32 irq_val = 0; 3234 u8 wake_val = 0; 3235 u8 val; 3236 3237 if (sdhci_cd_irq_can_wakeup(host)) { 3238 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3239 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3240 } 3241 3242 if (mmc_card_wake_sdio_irq(host->mmc)) { 3243 wake_val |= SDHCI_WAKE_ON_INT; 3244 irq_val |= SDHCI_INT_CARD_INT; 3245 } 3246 3247 if (!irq_val) 3248 return false; 3249 3250 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3251 val &= ~mask; 3252 val |= wake_val; 3253 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3254 3255 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3256 3257 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3258 3259 return host->irq_wake_enabled; 3260 } 3261 3262 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3263 { 3264 u8 val; 3265 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3266 | SDHCI_WAKE_ON_INT; 3267 3268 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3269 val &= ~mask; 3270 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3271 3272 disable_irq_wake(host->irq); 3273 3274 host->irq_wake_enabled = false; 3275 } 3276 3277 int sdhci_suspend_host(struct sdhci_host *host) 3278 { 3279 sdhci_disable_card_detection(host); 3280 3281 mmc_retune_timer_stop(host->mmc); 3282 3283 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3284 !sdhci_enable_irq_wakeups(host)) { 3285 host->ier = 0; 3286 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3287 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3288 free_irq(host->irq, host); 3289 } 3290 3291 return 0; 3292 } 3293 3294 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3295 3296 int sdhci_resume_host(struct sdhci_host *host) 3297 { 3298 struct mmc_host *mmc = host->mmc; 3299 int ret = 0; 3300 3301 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3302 if (host->ops->enable_dma) 3303 host->ops->enable_dma(host); 3304 } 3305 3306 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 3307 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3308 /* Card keeps power but host controller does not */ 3309 sdhci_init(host, 0); 3310 host->pwr = 0; 3311 host->clock = 0; 3312 mmc->ops->set_ios(mmc, &mmc->ios); 3313 } else { 3314 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 3315 } 3316 3317 if (host->irq_wake_enabled) { 3318 sdhci_disable_irq_wakeups(host); 3319 } else { 3320 ret = request_threaded_irq(host->irq, sdhci_irq, 3321 sdhci_thread_irq, IRQF_SHARED, 3322 mmc_hostname(host->mmc), host); 3323 if (ret) 3324 return ret; 3325 } 3326 3327 sdhci_enable_card_detection(host); 3328 3329 return ret; 3330 } 3331 3332 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3333 3334 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3335 { 3336 unsigned long flags; 3337 3338 mmc_retune_timer_stop(host->mmc); 3339 3340 spin_lock_irqsave(&host->lock, flags); 3341 host->ier &= SDHCI_INT_CARD_INT; 3342 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3343 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3344 spin_unlock_irqrestore(&host->lock, flags); 3345 3346 synchronize_hardirq(host->irq); 3347 3348 spin_lock_irqsave(&host->lock, flags); 3349 host->runtime_suspended = true; 3350 spin_unlock_irqrestore(&host->lock, flags); 3351 3352 return 0; 3353 } 3354 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3355 3356 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3357 { 3358 struct mmc_host *mmc = host->mmc; 3359 unsigned long flags; 3360 int host_flags = host->flags; 3361 3362 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3363 if (host->ops->enable_dma) 3364 host->ops->enable_dma(host); 3365 } 3366 3367 sdhci_init(host, soft_reset); 3368 3369 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3370 mmc->ios.power_mode != MMC_POWER_OFF) { 3371 /* Force clock and power re-program */ 3372 host->pwr = 0; 3373 host->clock = 0; 3374 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3375 mmc->ops->set_ios(mmc, &mmc->ios); 3376 3377 if ((host_flags & SDHCI_PV_ENABLED) && 3378 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3379 spin_lock_irqsave(&host->lock, flags); 3380 sdhci_enable_preset_value(host, true); 3381 spin_unlock_irqrestore(&host->lock, flags); 3382 } 3383 3384 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3385 mmc->ops->hs400_enhanced_strobe) 3386 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3387 } 3388 3389 spin_lock_irqsave(&host->lock, flags); 3390 3391 host->runtime_suspended = false; 3392 3393 /* Enable SDIO IRQ */ 3394 if (sdio_irq_claimed(mmc)) 3395 sdhci_enable_sdio_irq_nolock(host, true); 3396 3397 /* Enable Card Detection */ 3398 sdhci_enable_card_detection(host); 3399 3400 spin_unlock_irqrestore(&host->lock, flags); 3401 3402 return 0; 3403 } 3404 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3405 3406 #endif /* CONFIG_PM */ 3407 3408 /*****************************************************************************\ 3409 * * 3410 * Command Queue Engine (CQE) helpers * 3411 * * 3412 \*****************************************************************************/ 3413 3414 void sdhci_cqe_enable(struct mmc_host *mmc) 3415 { 3416 struct sdhci_host *host = mmc_priv(mmc); 3417 unsigned long flags; 3418 u8 ctrl; 3419 3420 spin_lock_irqsave(&host->lock, flags); 3421 3422 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3423 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3424 /* 3425 * Host from V4.10 supports ADMA3 DMA type. 3426 * ADMA3 performs integrated descriptor which is more suitable 3427 * for cmd queuing to fetch both command and transfer descriptors. 3428 */ 3429 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3430 ctrl |= SDHCI_CTRL_ADMA3; 3431 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3432 ctrl |= SDHCI_CTRL_ADMA64; 3433 else 3434 ctrl |= SDHCI_CTRL_ADMA32; 3435 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3436 3437 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3438 SDHCI_BLOCK_SIZE); 3439 3440 /* Set maximum timeout */ 3441 sdhci_set_timeout(host, NULL); 3442 3443 host->ier = host->cqe_ier; 3444 3445 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3446 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3447 3448 host->cqe_on = true; 3449 3450 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3451 mmc_hostname(mmc), host->ier, 3452 sdhci_readl(host, SDHCI_INT_STATUS)); 3453 3454 spin_unlock_irqrestore(&host->lock, flags); 3455 } 3456 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3457 3458 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3459 { 3460 struct sdhci_host *host = mmc_priv(mmc); 3461 unsigned long flags; 3462 3463 spin_lock_irqsave(&host->lock, flags); 3464 3465 sdhci_set_default_irqs(host); 3466 3467 host->cqe_on = false; 3468 3469 if (recovery) { 3470 sdhci_do_reset(host, SDHCI_RESET_CMD); 3471 sdhci_do_reset(host, SDHCI_RESET_DATA); 3472 } 3473 3474 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3475 mmc_hostname(mmc), host->ier, 3476 sdhci_readl(host, SDHCI_INT_STATUS)); 3477 3478 spin_unlock_irqrestore(&host->lock, flags); 3479 } 3480 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3481 3482 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3483 int *data_error) 3484 { 3485 u32 mask; 3486 3487 if (!host->cqe_on) 3488 return false; 3489 3490 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3491 *cmd_error = -EILSEQ; 3492 else if (intmask & SDHCI_INT_TIMEOUT) 3493 *cmd_error = -ETIMEDOUT; 3494 else 3495 *cmd_error = 0; 3496 3497 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3498 *data_error = -EILSEQ; 3499 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3500 *data_error = -ETIMEDOUT; 3501 else if (intmask & SDHCI_INT_ADMA_ERROR) 3502 *data_error = -EIO; 3503 else 3504 *data_error = 0; 3505 3506 /* Clear selected interrupts. */ 3507 mask = intmask & host->cqe_ier; 3508 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3509 3510 if (intmask & SDHCI_INT_BUS_POWER) 3511 pr_err("%s: Card is consuming too much power!\n", 3512 mmc_hostname(host->mmc)); 3513 3514 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3515 if (intmask) { 3516 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3517 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3518 mmc_hostname(host->mmc), intmask); 3519 sdhci_dumpregs(host); 3520 } 3521 3522 return true; 3523 } 3524 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3525 3526 /*****************************************************************************\ 3527 * * 3528 * Device allocation/registration * 3529 * * 3530 \*****************************************************************************/ 3531 3532 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3533 size_t priv_size) 3534 { 3535 struct mmc_host *mmc; 3536 struct sdhci_host *host; 3537 3538 WARN_ON(dev == NULL); 3539 3540 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3541 if (!mmc) 3542 return ERR_PTR(-ENOMEM); 3543 3544 host = mmc_priv(mmc); 3545 host->mmc = mmc; 3546 host->mmc_host_ops = sdhci_ops; 3547 mmc->ops = &host->mmc_host_ops; 3548 3549 host->flags = SDHCI_SIGNALING_330; 3550 3551 host->cqe_ier = SDHCI_CQE_INT_MASK; 3552 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3553 3554 host->tuning_delay = -1; 3555 host->tuning_loop_count = MAX_TUNING_LOOP; 3556 3557 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3558 3559 /* 3560 * The DMA table descriptor count is calculated as the maximum 3561 * number of segments times 2, to allow for an alignment 3562 * descriptor for each segment, plus 1 for a nop end descriptor. 3563 */ 3564 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 3565 3566 return host; 3567 } 3568 3569 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3570 3571 static int sdhci_set_dma_mask(struct sdhci_host *host) 3572 { 3573 struct mmc_host *mmc = host->mmc; 3574 struct device *dev = mmc_dev(mmc); 3575 int ret = -EINVAL; 3576 3577 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3578 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3579 3580 /* Try 64-bit mask if hardware is capable of it */ 3581 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3582 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3583 if (ret) { 3584 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3585 mmc_hostname(mmc)); 3586 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3587 } 3588 } 3589 3590 /* 32-bit mask as default & fallback */ 3591 if (ret) { 3592 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3593 if (ret) 3594 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 3595 mmc_hostname(mmc)); 3596 } 3597 3598 return ret; 3599 } 3600 3601 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 3602 const u32 *caps, const u32 *caps1) 3603 { 3604 u16 v; 3605 u64 dt_caps_mask = 0; 3606 u64 dt_caps = 0; 3607 3608 if (host->read_caps) 3609 return; 3610 3611 host->read_caps = true; 3612 3613 if (debug_quirks) 3614 host->quirks = debug_quirks; 3615 3616 if (debug_quirks2) 3617 host->quirks2 = debug_quirks2; 3618 3619 sdhci_do_reset(host, SDHCI_RESET_ALL); 3620 3621 if (host->v4_mode) 3622 sdhci_do_enable_v4_mode(host); 3623 3624 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3625 "sdhci-caps-mask", &dt_caps_mask); 3626 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3627 "sdhci-caps", &dt_caps); 3628 3629 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 3630 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 3631 3632 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 3633 return; 3634 3635 if (caps) { 3636 host->caps = *caps; 3637 } else { 3638 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 3639 host->caps &= ~lower_32_bits(dt_caps_mask); 3640 host->caps |= lower_32_bits(dt_caps); 3641 } 3642 3643 if (host->version < SDHCI_SPEC_300) 3644 return; 3645 3646 if (caps1) { 3647 host->caps1 = *caps1; 3648 } else { 3649 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 3650 host->caps1 &= ~upper_32_bits(dt_caps_mask); 3651 host->caps1 |= upper_32_bits(dt_caps); 3652 } 3653 } 3654 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 3655 3656 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 3657 { 3658 struct mmc_host *mmc = host->mmc; 3659 unsigned int max_blocks; 3660 unsigned int bounce_size; 3661 int ret; 3662 3663 /* 3664 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 3665 * has diminishing returns, this is probably because SD/MMC 3666 * cards are usually optimized to handle this size of requests. 3667 */ 3668 bounce_size = SZ_64K; 3669 /* 3670 * Adjust downwards to maximum request size if this is less 3671 * than our segment size, else hammer down the maximum 3672 * request size to the maximum buffer size. 3673 */ 3674 if (mmc->max_req_size < bounce_size) 3675 bounce_size = mmc->max_req_size; 3676 max_blocks = bounce_size / 512; 3677 3678 /* 3679 * When we just support one segment, we can get significant 3680 * speedups by the help of a bounce buffer to group scattered 3681 * reads/writes together. 3682 */ 3683 host->bounce_buffer = devm_kmalloc(mmc->parent, 3684 bounce_size, 3685 GFP_KERNEL); 3686 if (!host->bounce_buffer) { 3687 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 3688 mmc_hostname(mmc), 3689 bounce_size); 3690 /* 3691 * Exiting with zero here makes sure we proceed with 3692 * mmc->max_segs == 1. 3693 */ 3694 return; 3695 } 3696 3697 host->bounce_addr = dma_map_single(mmc->parent, 3698 host->bounce_buffer, 3699 bounce_size, 3700 DMA_BIDIRECTIONAL); 3701 ret = dma_mapping_error(mmc->parent, host->bounce_addr); 3702 if (ret) 3703 /* Again fall back to max_segs == 1 */ 3704 return; 3705 host->bounce_buffer_size = bounce_size; 3706 3707 /* Lie about this since we're bouncing */ 3708 mmc->max_segs = max_blocks; 3709 mmc->max_seg_size = bounce_size; 3710 mmc->max_req_size = bounce_size; 3711 3712 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 3713 mmc_hostname(mmc), max_blocks, bounce_size); 3714 } 3715 3716 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 3717 { 3718 /* 3719 * According to SD Host Controller spec v4.10, bit[27] added from 3720 * version 4.10 in Capabilities Register is used as 64-bit System 3721 * Address support for V4 mode. 3722 */ 3723 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 3724 return host->caps & SDHCI_CAN_64BIT_V4; 3725 3726 return host->caps & SDHCI_CAN_64BIT; 3727 } 3728 3729 int sdhci_setup_host(struct sdhci_host *host) 3730 { 3731 struct mmc_host *mmc; 3732 u32 max_current_caps; 3733 unsigned int ocr_avail; 3734 unsigned int override_timeout_clk; 3735 u32 max_clk; 3736 int ret; 3737 3738 WARN_ON(host == NULL); 3739 if (host == NULL) 3740 return -EINVAL; 3741 3742 mmc = host->mmc; 3743 3744 /* 3745 * If there are external regulators, get them. Note this must be done 3746 * early before resetting the host and reading the capabilities so that 3747 * the host can take the appropriate action if regulators are not 3748 * available. 3749 */ 3750 ret = mmc_regulator_get_supply(mmc); 3751 if (ret) 3752 return ret; 3753 3754 DBG("Version: 0x%08x | Present: 0x%08x\n", 3755 sdhci_readw(host, SDHCI_HOST_VERSION), 3756 sdhci_readl(host, SDHCI_PRESENT_STATE)); 3757 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 3758 sdhci_readl(host, SDHCI_CAPABILITIES), 3759 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 3760 3761 sdhci_read_caps(host); 3762 3763 override_timeout_clk = host->timeout_clk; 3764 3765 if (host->version > SDHCI_SPEC_420) { 3766 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 3767 mmc_hostname(mmc), host->version); 3768 } 3769 3770 if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) 3771 mmc->caps2 &= ~MMC_CAP2_CQE; 3772 3773 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 3774 host->flags |= SDHCI_USE_SDMA; 3775 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 3776 DBG("Controller doesn't have SDMA capability\n"); 3777 else 3778 host->flags |= SDHCI_USE_SDMA; 3779 3780 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 3781 (host->flags & SDHCI_USE_SDMA)) { 3782 DBG("Disabling DMA as it is marked broken\n"); 3783 host->flags &= ~SDHCI_USE_SDMA; 3784 } 3785 3786 if ((host->version >= SDHCI_SPEC_200) && 3787 (host->caps & SDHCI_CAN_DO_ADMA2)) 3788 host->flags |= SDHCI_USE_ADMA; 3789 3790 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 3791 (host->flags & SDHCI_USE_ADMA)) { 3792 DBG("Disabling ADMA as it is marked broken\n"); 3793 host->flags &= ~SDHCI_USE_ADMA; 3794 } 3795 3796 if (sdhci_can_64bit_dma(host)) 3797 host->flags |= SDHCI_USE_64_BIT_DMA; 3798 3799 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3800 if (host->ops->set_dma_mask) 3801 ret = host->ops->set_dma_mask(host); 3802 else 3803 ret = sdhci_set_dma_mask(host); 3804 3805 if (!ret && host->ops->enable_dma) 3806 ret = host->ops->enable_dma(host); 3807 3808 if (ret) { 3809 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 3810 mmc_hostname(mmc)); 3811 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 3812 3813 ret = 0; 3814 } 3815 } 3816 3817 /* SDMA does not support 64-bit DMA if v4 mode not set */ 3818 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 3819 host->flags &= ~SDHCI_USE_SDMA; 3820 3821 if (host->flags & SDHCI_USE_ADMA) { 3822 dma_addr_t dma; 3823 void *buf; 3824 3825 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3826 host->adma_table_sz = host->adma_table_cnt * 3827 SDHCI_ADMA2_64_DESC_SZ(host); 3828 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 3829 } else { 3830 host->adma_table_sz = host->adma_table_cnt * 3831 SDHCI_ADMA2_32_DESC_SZ; 3832 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 3833 } 3834 3835 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 3836 /* 3837 * Use zalloc to zero the reserved high 32-bits of 128-bit 3838 * descriptors so that they never need to be written. 3839 */ 3840 buf = dma_alloc_coherent(mmc_dev(mmc), 3841 host->align_buffer_sz + host->adma_table_sz, 3842 &dma, GFP_KERNEL); 3843 if (!buf) { 3844 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3845 mmc_hostname(mmc)); 3846 host->flags &= ~SDHCI_USE_ADMA; 3847 } else if ((dma + host->align_buffer_sz) & 3848 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 3849 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 3850 mmc_hostname(mmc)); 3851 host->flags &= ~SDHCI_USE_ADMA; 3852 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3853 host->adma_table_sz, buf, dma); 3854 } else { 3855 host->align_buffer = buf; 3856 host->align_addr = dma; 3857 3858 host->adma_table = buf + host->align_buffer_sz; 3859 host->adma_addr = dma + host->align_buffer_sz; 3860 } 3861 } 3862 3863 /* 3864 * If we use DMA, then it's up to the caller to set the DMA 3865 * mask, but PIO does not need the hw shim so we set a new 3866 * mask here in that case. 3867 */ 3868 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 3869 host->dma_mask = DMA_BIT_MASK(64); 3870 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3871 } 3872 3873 if (host->version >= SDHCI_SPEC_300) 3874 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) 3875 >> SDHCI_CLOCK_BASE_SHIFT; 3876 else 3877 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK) 3878 >> SDHCI_CLOCK_BASE_SHIFT; 3879 3880 host->max_clk *= 1000000; 3881 if (host->max_clk == 0 || host->quirks & 3882 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 3883 if (!host->ops->get_max_clock) { 3884 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 3885 mmc_hostname(mmc)); 3886 ret = -ENODEV; 3887 goto undma; 3888 } 3889 host->max_clk = host->ops->get_max_clock(host); 3890 } 3891 3892 /* 3893 * In case of Host Controller v3.00, find out whether clock 3894 * multiplier is supported. 3895 */ 3896 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >> 3897 SDHCI_CLOCK_MUL_SHIFT; 3898 3899 /* 3900 * In case the value in Clock Multiplier is 0, then programmable 3901 * clock mode is not supported, otherwise the actual clock 3902 * multiplier is one more than the value of Clock Multiplier 3903 * in the Capabilities Register. 3904 */ 3905 if (host->clk_mul) 3906 host->clk_mul += 1; 3907 3908 /* 3909 * Set host parameters. 3910 */ 3911 max_clk = host->max_clk; 3912 3913 if (host->ops->get_min_clock) 3914 mmc->f_min = host->ops->get_min_clock(host); 3915 else if (host->version >= SDHCI_SPEC_300) { 3916 if (host->clk_mul) { 3917 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3918 max_clk = host->max_clk * host->clk_mul; 3919 } else 3920 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3921 } else 3922 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3923 3924 if (!mmc->f_max || mmc->f_max > max_clk) 3925 mmc->f_max = max_clk; 3926 3927 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3928 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> 3929 SDHCI_TIMEOUT_CLK_SHIFT; 3930 3931 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 3932 host->timeout_clk *= 1000; 3933 3934 if (host->timeout_clk == 0) { 3935 if (!host->ops->get_timeout_clock) { 3936 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3937 mmc_hostname(mmc)); 3938 ret = -ENODEV; 3939 goto undma; 3940 } 3941 3942 host->timeout_clk = 3943 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 3944 1000); 3945 } 3946 3947 if (override_timeout_clk) 3948 host->timeout_clk = override_timeout_clk; 3949 3950 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3951 host->ops->get_max_timeout_count(host) : 1 << 27; 3952 mmc->max_busy_timeout /= host->timeout_clk; 3953 } 3954 3955 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 3956 !host->ops->get_max_timeout_count) 3957 mmc->max_busy_timeout = 0; 3958 3959 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3960 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3961 3962 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3963 host->flags |= SDHCI_AUTO_CMD12; 3964 3965 /* 3966 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 3967 * For v4 mode, SDMA may use Auto-CMD23 as well. 3968 */ 3969 if ((host->version >= SDHCI_SPEC_300) && 3970 ((host->flags & SDHCI_USE_ADMA) || 3971 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 3972 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3973 host->flags |= SDHCI_AUTO_CMD23; 3974 DBG("Auto-CMD23 available\n"); 3975 } else { 3976 DBG("Auto-CMD23 unavailable\n"); 3977 } 3978 3979 /* 3980 * A controller may support 8-bit width, but the board itself 3981 * might not have the pins brought out. Boards that support 3982 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3983 * their platform code before calling sdhci_add_host(), and we 3984 * won't assume 8-bit width for hosts without that CAP. 3985 */ 3986 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3987 mmc->caps |= MMC_CAP_4_BIT_DATA; 3988 3989 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3990 mmc->caps &= ~MMC_CAP_CMD23; 3991 3992 if (host->caps & SDHCI_CAN_DO_HISPD) 3993 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3994 3995 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3996 mmc_card_is_removable(mmc) && 3997 mmc_gpio_get_cd(host->mmc) < 0) 3998 mmc->caps |= MMC_CAP_NEEDS_POLL; 3999 4000 if (!IS_ERR(mmc->supply.vqmmc)) { 4001 ret = regulator_enable(mmc->supply.vqmmc); 4002 4003 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4004 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4005 1950000)) 4006 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4007 SDHCI_SUPPORT_SDR50 | 4008 SDHCI_SUPPORT_DDR50); 4009 4010 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4011 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4012 3600000)) 4013 host->flags &= ~SDHCI_SIGNALING_330; 4014 4015 if (ret) { 4016 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4017 mmc_hostname(mmc), ret); 4018 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4019 } 4020 } 4021 4022 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4023 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4024 SDHCI_SUPPORT_DDR50); 4025 /* 4026 * The SDHCI controller in a SoC might support HS200/HS400 4027 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4028 * but if the board is modeled such that the IO lines are not 4029 * connected to 1.8v then HS200/HS400 cannot be supported. 4030 * Disable HS200/HS400 if the board does not have 1.8v connected 4031 * to the IO lines. (Applicable for other modes in 1.8v) 4032 */ 4033 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4034 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4035 } 4036 4037 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4038 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4039 SDHCI_SUPPORT_DDR50)) 4040 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4041 4042 /* SDR104 supports also implies SDR50 support */ 4043 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4044 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4045 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4046 * field can be promoted to support HS200. 4047 */ 4048 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4049 mmc->caps2 |= MMC_CAP2_HS200; 4050 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4051 mmc->caps |= MMC_CAP_UHS_SDR50; 4052 } 4053 4054 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4055 (host->caps1 & SDHCI_SUPPORT_HS400)) 4056 mmc->caps2 |= MMC_CAP2_HS400; 4057 4058 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4059 (IS_ERR(mmc->supply.vqmmc) || 4060 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4061 1300000))) 4062 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4063 4064 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4065 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4066 mmc->caps |= MMC_CAP_UHS_DDR50; 4067 4068 /* Does the host need tuning for SDR50? */ 4069 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4070 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4071 4072 /* Driver Type(s) (A, C, D) supported by the host */ 4073 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4074 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4075 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4076 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4077 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4078 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4079 4080 /* Initial value for re-tuning timer count */ 4081 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 4082 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 4083 4084 /* 4085 * In case Re-tuning Timer is not disabled, the actual value of 4086 * re-tuning timer will be 2 ^ (n - 1). 4087 */ 4088 if (host->tuning_count) 4089 host->tuning_count = 1 << (host->tuning_count - 1); 4090 4091 /* Re-tuning mode supported by the Host Controller */ 4092 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >> 4093 SDHCI_RETUNING_MODE_SHIFT; 4094 4095 ocr_avail = 0; 4096 4097 /* 4098 * According to SD Host Controller spec v3.00, if the Host System 4099 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4100 * the value is meaningful only if Voltage Support in the Capabilities 4101 * register is set. The actual current value is 4 times the register 4102 * value. 4103 */ 4104 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4105 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4106 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4107 if (curr > 0) { 4108 4109 /* convert to SDHCI_MAX_CURRENT format */ 4110 curr = curr/1000; /* convert to mA */ 4111 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4112 4113 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4114 max_current_caps = 4115 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 4116 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 4117 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 4118 } 4119 } 4120 4121 if (host->caps & SDHCI_CAN_VDD_330) { 4122 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4123 4124 mmc->max_current_330 = ((max_current_caps & 4125 SDHCI_MAX_CURRENT_330_MASK) >> 4126 SDHCI_MAX_CURRENT_330_SHIFT) * 4127 SDHCI_MAX_CURRENT_MULTIPLIER; 4128 } 4129 if (host->caps & SDHCI_CAN_VDD_300) { 4130 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4131 4132 mmc->max_current_300 = ((max_current_caps & 4133 SDHCI_MAX_CURRENT_300_MASK) >> 4134 SDHCI_MAX_CURRENT_300_SHIFT) * 4135 SDHCI_MAX_CURRENT_MULTIPLIER; 4136 } 4137 if (host->caps & SDHCI_CAN_VDD_180) { 4138 ocr_avail |= MMC_VDD_165_195; 4139 4140 mmc->max_current_180 = ((max_current_caps & 4141 SDHCI_MAX_CURRENT_180_MASK) >> 4142 SDHCI_MAX_CURRENT_180_SHIFT) * 4143 SDHCI_MAX_CURRENT_MULTIPLIER; 4144 } 4145 4146 /* If OCR set by host, use it instead. */ 4147 if (host->ocr_mask) 4148 ocr_avail = host->ocr_mask; 4149 4150 /* If OCR set by external regulators, give it highest prio. */ 4151 if (mmc->ocr_avail) 4152 ocr_avail = mmc->ocr_avail; 4153 4154 mmc->ocr_avail = ocr_avail; 4155 mmc->ocr_avail_sdio = ocr_avail; 4156 if (host->ocr_avail_sdio) 4157 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4158 mmc->ocr_avail_sd = ocr_avail; 4159 if (host->ocr_avail_sd) 4160 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4161 else /* normal SD controllers don't support 1.8V */ 4162 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4163 mmc->ocr_avail_mmc = ocr_avail; 4164 if (host->ocr_avail_mmc) 4165 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4166 4167 if (mmc->ocr_avail == 0) { 4168 pr_err("%s: Hardware doesn't report any support voltages.\n", 4169 mmc_hostname(mmc)); 4170 ret = -ENODEV; 4171 goto unreg; 4172 } 4173 4174 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4175 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4176 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4177 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4178 host->flags |= SDHCI_SIGNALING_180; 4179 4180 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4181 host->flags |= SDHCI_SIGNALING_120; 4182 4183 spin_lock_init(&host->lock); 4184 4185 /* 4186 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4187 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4188 * is less anyway. 4189 */ 4190 mmc->max_req_size = 524288; 4191 4192 /* 4193 * Maximum number of segments. Depends on if the hardware 4194 * can do scatter/gather or not. 4195 */ 4196 if (host->flags & SDHCI_USE_ADMA) { 4197 mmc->max_segs = SDHCI_MAX_SEGS; 4198 } else if (host->flags & SDHCI_USE_SDMA) { 4199 mmc->max_segs = 1; 4200 if (swiotlb_max_segment()) { 4201 unsigned int max_req_size = (1 << IO_TLB_SHIFT) * 4202 IO_TLB_SEGSIZE; 4203 mmc->max_req_size = min(mmc->max_req_size, 4204 max_req_size); 4205 } 4206 } else { /* PIO */ 4207 mmc->max_segs = SDHCI_MAX_SEGS; 4208 } 4209 4210 /* 4211 * Maximum segment size. Could be one segment with the maximum number 4212 * of bytes. When doing hardware scatter/gather, each entry cannot 4213 * be larger than 64 KiB though. 4214 */ 4215 if (host->flags & SDHCI_USE_ADMA) { 4216 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 4217 mmc->max_seg_size = 65535; 4218 else 4219 mmc->max_seg_size = 65536; 4220 } else { 4221 mmc->max_seg_size = mmc->max_req_size; 4222 } 4223 4224 /* 4225 * Maximum block size. This varies from controller to controller and 4226 * is specified in the capabilities register. 4227 */ 4228 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4229 mmc->max_blk_size = 2; 4230 } else { 4231 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4232 SDHCI_MAX_BLOCK_SHIFT; 4233 if (mmc->max_blk_size >= 3) { 4234 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4235 mmc_hostname(mmc)); 4236 mmc->max_blk_size = 0; 4237 } 4238 } 4239 4240 mmc->max_blk_size = 512 << mmc->max_blk_size; 4241 4242 /* 4243 * Maximum block count. 4244 */ 4245 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4246 4247 if (mmc->max_segs == 1) 4248 /* This may alter mmc->*_blk_* parameters */ 4249 sdhci_allocate_bounce_buffer(host); 4250 4251 return 0; 4252 4253 unreg: 4254 if (!IS_ERR(mmc->supply.vqmmc)) 4255 regulator_disable(mmc->supply.vqmmc); 4256 undma: 4257 if (host->align_buffer) 4258 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4259 host->adma_table_sz, host->align_buffer, 4260 host->align_addr); 4261 host->adma_table = NULL; 4262 host->align_buffer = NULL; 4263 4264 return ret; 4265 } 4266 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4267 4268 void sdhci_cleanup_host(struct sdhci_host *host) 4269 { 4270 struct mmc_host *mmc = host->mmc; 4271 4272 if (!IS_ERR(mmc->supply.vqmmc)) 4273 regulator_disable(mmc->supply.vqmmc); 4274 4275 if (host->align_buffer) 4276 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4277 host->adma_table_sz, host->align_buffer, 4278 host->align_addr); 4279 host->adma_table = NULL; 4280 host->align_buffer = NULL; 4281 } 4282 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4283 4284 int __sdhci_add_host(struct sdhci_host *host) 4285 { 4286 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4287 struct mmc_host *mmc = host->mmc; 4288 int ret; 4289 4290 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4291 if (!host->complete_wq) 4292 return -ENOMEM; 4293 4294 INIT_WORK(&host->complete_work, sdhci_complete_work); 4295 4296 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4297 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4298 4299 init_waitqueue_head(&host->buf_ready_int); 4300 4301 sdhci_init(host, 0); 4302 4303 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4304 IRQF_SHARED, mmc_hostname(mmc), host); 4305 if (ret) { 4306 pr_err("%s: Failed to request IRQ %d: %d\n", 4307 mmc_hostname(mmc), host->irq, ret); 4308 goto unwq; 4309 } 4310 4311 ret = sdhci_led_register(host); 4312 if (ret) { 4313 pr_err("%s: Failed to register LED device: %d\n", 4314 mmc_hostname(mmc), ret); 4315 goto unirq; 4316 } 4317 4318 ret = mmc_add_host(mmc); 4319 if (ret) 4320 goto unled; 4321 4322 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4323 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4324 (host->flags & SDHCI_USE_ADMA) ? 4325 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4326 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4327 4328 sdhci_enable_card_detection(host); 4329 4330 return 0; 4331 4332 unled: 4333 sdhci_led_unregister(host); 4334 unirq: 4335 sdhci_do_reset(host, SDHCI_RESET_ALL); 4336 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4337 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4338 free_irq(host->irq, host); 4339 unwq: 4340 destroy_workqueue(host->complete_wq); 4341 4342 return ret; 4343 } 4344 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4345 4346 int sdhci_add_host(struct sdhci_host *host) 4347 { 4348 int ret; 4349 4350 ret = sdhci_setup_host(host); 4351 if (ret) 4352 return ret; 4353 4354 ret = __sdhci_add_host(host); 4355 if (ret) 4356 goto cleanup; 4357 4358 return 0; 4359 4360 cleanup: 4361 sdhci_cleanup_host(host); 4362 4363 return ret; 4364 } 4365 EXPORT_SYMBOL_GPL(sdhci_add_host); 4366 4367 void sdhci_remove_host(struct sdhci_host *host, int dead) 4368 { 4369 struct mmc_host *mmc = host->mmc; 4370 unsigned long flags; 4371 4372 if (dead) { 4373 spin_lock_irqsave(&host->lock, flags); 4374 4375 host->flags |= SDHCI_DEVICE_DEAD; 4376 4377 if (sdhci_has_requests(host)) { 4378 pr_err("%s: Controller removed during " 4379 " transfer!\n", mmc_hostname(mmc)); 4380 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4381 } 4382 4383 spin_unlock_irqrestore(&host->lock, flags); 4384 } 4385 4386 sdhci_disable_card_detection(host); 4387 4388 mmc_remove_host(mmc); 4389 4390 sdhci_led_unregister(host); 4391 4392 if (!dead) 4393 sdhci_do_reset(host, SDHCI_RESET_ALL); 4394 4395 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4396 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4397 free_irq(host->irq, host); 4398 4399 del_timer_sync(&host->timer); 4400 del_timer_sync(&host->data_timer); 4401 4402 destroy_workqueue(host->complete_wq); 4403 4404 if (!IS_ERR(mmc->supply.vqmmc)) 4405 regulator_disable(mmc->supply.vqmmc); 4406 4407 if (host->align_buffer) 4408 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4409 host->adma_table_sz, host->align_buffer, 4410 host->align_addr); 4411 4412 host->adma_table = NULL; 4413 host->align_buffer = NULL; 4414 } 4415 4416 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4417 4418 void sdhci_free_host(struct sdhci_host *host) 4419 { 4420 mmc_free_host(host->mmc); 4421 } 4422 4423 EXPORT_SYMBOL_GPL(sdhci_free_host); 4424 4425 /*****************************************************************************\ 4426 * * 4427 * Driver init/exit * 4428 * * 4429 \*****************************************************************************/ 4430 4431 static int __init sdhci_drv_init(void) 4432 { 4433 pr_info(DRIVER_NAME 4434 ": Secure Digital Host Controller Interface driver\n"); 4435 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4436 4437 return 0; 4438 } 4439 4440 static void __exit sdhci_drv_exit(void) 4441 { 4442 } 4443 4444 module_init(sdhci_drv_init); 4445 module_exit(sdhci_drv_exit); 4446 4447 module_param(debug_quirks, uint, 0444); 4448 module_param(debug_quirks2, uint, 0444); 4449 4450 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4451 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4452 MODULE_LICENSE("GPL"); 4453 4454 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4455 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4456