1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/swiotlb.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/of.h> 27 28 #include <linux/leds.h> 29 30 #include <linux/mmc/mmc.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/card.h> 33 #include <linux/mmc/sdio.h> 34 #include <linux/mmc/slot-gpio.h> 35 36 #include "sdhci.h" 37 38 #define DRIVER_NAME "sdhci" 39 40 #define DBG(f, x...) \ 41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 42 43 #define SDHCI_DUMP(f, x...) \ 44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 45 46 #define MAX_TUNING_LOOP 40 47 48 static unsigned int debug_quirks = 0; 49 static unsigned int debug_quirks2; 50 51 static void sdhci_finish_data(struct sdhci_host *); 52 53 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 54 55 void sdhci_dumpregs(struct sdhci_host *host) 56 { 57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 58 59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 60 sdhci_readl(host, SDHCI_DMA_ADDRESS), 61 sdhci_readw(host, SDHCI_HOST_VERSION)); 62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 63 sdhci_readw(host, SDHCI_BLOCK_SIZE), 64 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 66 sdhci_readl(host, SDHCI_ARGUMENT), 67 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 69 sdhci_readl(host, SDHCI_PRESENT_STATE), 70 sdhci_readb(host, SDHCI_HOST_CONTROL)); 71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 72 sdhci_readb(host, SDHCI_POWER_CONTROL), 73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 76 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 79 sdhci_readl(host, SDHCI_INT_STATUS)); 80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 81 sdhci_readl(host, SDHCI_INT_ENABLE), 82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 83 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 84 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 87 sdhci_readl(host, SDHCI_CAPABILITIES), 88 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 90 sdhci_readw(host, SDHCI_COMMAND), 91 sdhci_readl(host, SDHCI_MAX_CURRENT)); 92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 93 sdhci_readl(host, SDHCI_RESPONSE), 94 sdhci_readl(host, SDHCI_RESPONSE + 4)); 95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 96 sdhci_readl(host, SDHCI_RESPONSE + 8), 97 sdhci_readl(host, SDHCI_RESPONSE + 12)); 98 SDHCI_DUMP("Host ctl2: 0x%08x\n", 99 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 100 101 if (host->flags & SDHCI_USE_ADMA) { 102 if (host->flags & SDHCI_USE_64_BIT_DMA) { 103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 104 sdhci_readl(host, SDHCI_ADMA_ERROR), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 106 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 107 } else { 108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 109 sdhci_readl(host, SDHCI_ADMA_ERROR), 110 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 111 } 112 } 113 114 SDHCI_DUMP("============================================\n"); 115 } 116 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 117 118 /*****************************************************************************\ 119 * * 120 * Low level functions * 121 * * 122 \*****************************************************************************/ 123 124 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 125 { 126 u16 ctrl2; 127 128 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 129 if (ctrl2 & SDHCI_CTRL_V4_MODE) 130 return; 131 132 ctrl2 |= SDHCI_CTRL_V4_MODE; 133 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 134 } 135 136 /* 137 * This can be called before sdhci_add_host() by Vendor's host controller 138 * driver to enable v4 mode if supported. 139 */ 140 void sdhci_enable_v4_mode(struct sdhci_host *host) 141 { 142 host->v4_mode = true; 143 sdhci_do_enable_v4_mode(host); 144 } 145 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 146 147 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 148 { 149 return cmd->data || cmd->flags & MMC_RSP_BUSY; 150 } 151 152 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 153 { 154 u32 present; 155 156 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 157 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 158 return; 159 160 if (enable) { 161 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 162 SDHCI_CARD_PRESENT; 163 164 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 165 SDHCI_INT_CARD_INSERT; 166 } else { 167 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 168 } 169 170 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 171 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 172 } 173 174 static void sdhci_enable_card_detection(struct sdhci_host *host) 175 { 176 sdhci_set_card_detection(host, true); 177 } 178 179 static void sdhci_disable_card_detection(struct sdhci_host *host) 180 { 181 sdhci_set_card_detection(host, false); 182 } 183 184 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 185 { 186 if (host->bus_on) 187 return; 188 host->bus_on = true; 189 pm_runtime_get_noresume(host->mmc->parent); 190 } 191 192 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 193 { 194 if (!host->bus_on) 195 return; 196 host->bus_on = false; 197 pm_runtime_put_noidle(host->mmc->parent); 198 } 199 200 void sdhci_reset(struct sdhci_host *host, u8 mask) 201 { 202 ktime_t timeout; 203 204 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 205 206 if (mask & SDHCI_RESET_ALL) { 207 host->clock = 0; 208 /* Reset-all turns off SD Bus Power */ 209 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 210 sdhci_runtime_pm_bus_off(host); 211 } 212 213 /* Wait max 100 ms */ 214 timeout = ktime_add_ms(ktime_get(), 100); 215 216 /* hw clears the bit when it's done */ 217 while (1) { 218 bool timedout = ktime_after(ktime_get(), timeout); 219 220 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 221 break; 222 if (timedout) { 223 pr_err("%s: Reset 0x%x never completed.\n", 224 mmc_hostname(host->mmc), (int)mask); 225 sdhci_dumpregs(host); 226 return; 227 } 228 udelay(10); 229 } 230 } 231 EXPORT_SYMBOL_GPL(sdhci_reset); 232 233 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 234 { 235 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 236 struct mmc_host *mmc = host->mmc; 237 238 if (!mmc->ops->get_cd(mmc)) 239 return; 240 } 241 242 host->ops->reset(host, mask); 243 244 if (mask & SDHCI_RESET_ALL) { 245 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 246 if (host->ops->enable_dma) 247 host->ops->enable_dma(host); 248 } 249 250 /* Resetting the controller clears many */ 251 host->preset_enabled = false; 252 } 253 } 254 255 static void sdhci_set_default_irqs(struct sdhci_host *host) 256 { 257 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 258 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 259 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 260 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 261 SDHCI_INT_RESPONSE; 262 263 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 264 host->tuning_mode == SDHCI_TUNING_MODE_3) 265 host->ier |= SDHCI_INT_RETUNE; 266 267 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 268 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 269 } 270 271 static void sdhci_config_dma(struct sdhci_host *host) 272 { 273 u8 ctrl; 274 u16 ctrl2; 275 276 if (host->version < SDHCI_SPEC_200) 277 return; 278 279 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 280 281 /* 282 * Always adjust the DMA selection as some controllers 283 * (e.g. JMicron) can't do PIO properly when the selection 284 * is ADMA. 285 */ 286 ctrl &= ~SDHCI_CTRL_DMA_MASK; 287 if (!(host->flags & SDHCI_REQ_USE_DMA)) 288 goto out; 289 290 /* Note if DMA Select is zero then SDMA is selected */ 291 if (host->flags & SDHCI_USE_ADMA) 292 ctrl |= SDHCI_CTRL_ADMA32; 293 294 if (host->flags & SDHCI_USE_64_BIT_DMA) { 295 /* 296 * If v4 mode, all supported DMA can be 64-bit addressing if 297 * controller supports 64-bit system address, otherwise only 298 * ADMA can support 64-bit addressing. 299 */ 300 if (host->v4_mode) { 301 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 302 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 303 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 304 } else if (host->flags & SDHCI_USE_ADMA) { 305 /* 306 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 307 * set SDHCI_CTRL_ADMA64. 308 */ 309 ctrl |= SDHCI_CTRL_ADMA64; 310 } 311 } 312 313 out: 314 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 315 } 316 317 static void sdhci_init(struct sdhci_host *host, int soft) 318 { 319 struct mmc_host *mmc = host->mmc; 320 321 if (soft) 322 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 323 else 324 sdhci_do_reset(host, SDHCI_RESET_ALL); 325 326 if (host->v4_mode) 327 sdhci_do_enable_v4_mode(host); 328 329 sdhci_set_default_irqs(host); 330 331 host->cqe_on = false; 332 333 if (soft) { 334 /* force clock reconfiguration */ 335 host->clock = 0; 336 mmc->ops->set_ios(mmc, &mmc->ios); 337 } 338 } 339 340 static void sdhci_reinit(struct sdhci_host *host) 341 { 342 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 343 344 sdhci_init(host, 0); 345 sdhci_enable_card_detection(host); 346 347 /* 348 * A change to the card detect bits indicates a change in present state, 349 * refer sdhci_set_card_detection(). A card detect interrupt might have 350 * been missed while the host controller was being reset, so trigger a 351 * rescan to check. 352 */ 353 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 354 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 355 } 356 357 static void __sdhci_led_activate(struct sdhci_host *host) 358 { 359 u8 ctrl; 360 361 if (host->quirks & SDHCI_QUIRK_NO_LED) 362 return; 363 364 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 365 ctrl |= SDHCI_CTRL_LED; 366 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 367 } 368 369 static void __sdhci_led_deactivate(struct sdhci_host *host) 370 { 371 u8 ctrl; 372 373 if (host->quirks & SDHCI_QUIRK_NO_LED) 374 return; 375 376 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 377 ctrl &= ~SDHCI_CTRL_LED; 378 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 379 } 380 381 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 382 static void sdhci_led_control(struct led_classdev *led, 383 enum led_brightness brightness) 384 { 385 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 386 unsigned long flags; 387 388 spin_lock_irqsave(&host->lock, flags); 389 390 if (host->runtime_suspended) 391 goto out; 392 393 if (brightness == LED_OFF) 394 __sdhci_led_deactivate(host); 395 else 396 __sdhci_led_activate(host); 397 out: 398 spin_unlock_irqrestore(&host->lock, flags); 399 } 400 401 static int sdhci_led_register(struct sdhci_host *host) 402 { 403 struct mmc_host *mmc = host->mmc; 404 405 if (host->quirks & SDHCI_QUIRK_NO_LED) 406 return 0; 407 408 snprintf(host->led_name, sizeof(host->led_name), 409 "%s::", mmc_hostname(mmc)); 410 411 host->led.name = host->led_name; 412 host->led.brightness = LED_OFF; 413 host->led.default_trigger = mmc_hostname(mmc); 414 host->led.brightness_set = sdhci_led_control; 415 416 return led_classdev_register(mmc_dev(mmc), &host->led); 417 } 418 419 static void sdhci_led_unregister(struct sdhci_host *host) 420 { 421 if (host->quirks & SDHCI_QUIRK_NO_LED) 422 return; 423 424 led_classdev_unregister(&host->led); 425 } 426 427 static inline void sdhci_led_activate(struct sdhci_host *host) 428 { 429 } 430 431 static inline void sdhci_led_deactivate(struct sdhci_host *host) 432 { 433 } 434 435 #else 436 437 static inline int sdhci_led_register(struct sdhci_host *host) 438 { 439 return 0; 440 } 441 442 static inline void sdhci_led_unregister(struct sdhci_host *host) 443 { 444 } 445 446 static inline void sdhci_led_activate(struct sdhci_host *host) 447 { 448 __sdhci_led_activate(host); 449 } 450 451 static inline void sdhci_led_deactivate(struct sdhci_host *host) 452 { 453 __sdhci_led_deactivate(host); 454 } 455 456 #endif 457 458 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 459 unsigned long timeout) 460 { 461 if (sdhci_data_line_cmd(mrq->cmd)) 462 mod_timer(&host->data_timer, timeout); 463 else 464 mod_timer(&host->timer, timeout); 465 } 466 467 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 468 { 469 if (sdhci_data_line_cmd(mrq->cmd)) 470 del_timer(&host->data_timer); 471 else 472 del_timer(&host->timer); 473 } 474 475 static inline bool sdhci_has_requests(struct sdhci_host *host) 476 { 477 return host->cmd || host->data_cmd; 478 } 479 480 /*****************************************************************************\ 481 * * 482 * Core functions * 483 * * 484 \*****************************************************************************/ 485 486 static void sdhci_read_block_pio(struct sdhci_host *host) 487 { 488 unsigned long flags; 489 size_t blksize, len, chunk; 490 u32 uninitialized_var(scratch); 491 u8 *buf; 492 493 DBG("PIO reading\n"); 494 495 blksize = host->data->blksz; 496 chunk = 0; 497 498 local_irq_save(flags); 499 500 while (blksize) { 501 BUG_ON(!sg_miter_next(&host->sg_miter)); 502 503 len = min(host->sg_miter.length, blksize); 504 505 blksize -= len; 506 host->sg_miter.consumed = len; 507 508 buf = host->sg_miter.addr; 509 510 while (len) { 511 if (chunk == 0) { 512 scratch = sdhci_readl(host, SDHCI_BUFFER); 513 chunk = 4; 514 } 515 516 *buf = scratch & 0xFF; 517 518 buf++; 519 scratch >>= 8; 520 chunk--; 521 len--; 522 } 523 } 524 525 sg_miter_stop(&host->sg_miter); 526 527 local_irq_restore(flags); 528 } 529 530 static void sdhci_write_block_pio(struct sdhci_host *host) 531 { 532 unsigned long flags; 533 size_t blksize, len, chunk; 534 u32 scratch; 535 u8 *buf; 536 537 DBG("PIO writing\n"); 538 539 blksize = host->data->blksz; 540 chunk = 0; 541 scratch = 0; 542 543 local_irq_save(flags); 544 545 while (blksize) { 546 BUG_ON(!sg_miter_next(&host->sg_miter)); 547 548 len = min(host->sg_miter.length, blksize); 549 550 blksize -= len; 551 host->sg_miter.consumed = len; 552 553 buf = host->sg_miter.addr; 554 555 while (len) { 556 scratch |= (u32)*buf << (chunk * 8); 557 558 buf++; 559 chunk++; 560 len--; 561 562 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 563 sdhci_writel(host, scratch, SDHCI_BUFFER); 564 chunk = 0; 565 scratch = 0; 566 } 567 } 568 } 569 570 sg_miter_stop(&host->sg_miter); 571 572 local_irq_restore(flags); 573 } 574 575 static void sdhci_transfer_pio(struct sdhci_host *host) 576 { 577 u32 mask; 578 579 if (host->blocks == 0) 580 return; 581 582 if (host->data->flags & MMC_DATA_READ) 583 mask = SDHCI_DATA_AVAILABLE; 584 else 585 mask = SDHCI_SPACE_AVAILABLE; 586 587 /* 588 * Some controllers (JMicron JMB38x) mess up the buffer bits 589 * for transfers < 4 bytes. As long as it is just one block, 590 * we can ignore the bits. 591 */ 592 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 593 (host->data->blocks == 1)) 594 mask = ~0; 595 596 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 597 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 598 udelay(100); 599 600 if (host->data->flags & MMC_DATA_READ) 601 sdhci_read_block_pio(host); 602 else 603 sdhci_write_block_pio(host); 604 605 host->blocks--; 606 if (host->blocks == 0) 607 break; 608 } 609 610 DBG("PIO transfer complete.\n"); 611 } 612 613 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 614 struct mmc_data *data, int cookie) 615 { 616 int sg_count; 617 618 /* 619 * If the data buffers are already mapped, return the previous 620 * dma_map_sg() result. 621 */ 622 if (data->host_cookie == COOKIE_PRE_MAPPED) 623 return data->sg_count; 624 625 /* Bounce write requests to the bounce buffer */ 626 if (host->bounce_buffer) { 627 unsigned int length = data->blksz * data->blocks; 628 629 if (length > host->bounce_buffer_size) { 630 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 631 mmc_hostname(host->mmc), length, 632 host->bounce_buffer_size); 633 return -EIO; 634 } 635 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 636 /* Copy the data to the bounce buffer */ 637 sg_copy_to_buffer(data->sg, data->sg_len, 638 host->bounce_buffer, 639 length); 640 } 641 /* Switch ownership to the DMA */ 642 dma_sync_single_for_device(host->mmc->parent, 643 host->bounce_addr, 644 host->bounce_buffer_size, 645 mmc_get_dma_dir(data)); 646 /* Just a dummy value */ 647 sg_count = 1; 648 } else { 649 /* Just access the data directly from memory */ 650 sg_count = dma_map_sg(mmc_dev(host->mmc), 651 data->sg, data->sg_len, 652 mmc_get_dma_dir(data)); 653 } 654 655 if (sg_count == 0) 656 return -ENOSPC; 657 658 data->sg_count = sg_count; 659 data->host_cookie = cookie; 660 661 return sg_count; 662 } 663 664 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 665 { 666 local_irq_save(*flags); 667 return kmap_atomic(sg_page(sg)) + sg->offset; 668 } 669 670 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 671 { 672 kunmap_atomic(buffer); 673 local_irq_restore(*flags); 674 } 675 676 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 677 dma_addr_t addr, int len, unsigned int cmd) 678 { 679 struct sdhci_adma2_64_desc *dma_desc = *desc; 680 681 /* 32-bit and 64-bit descriptors have these members in same position */ 682 dma_desc->cmd = cpu_to_le16(cmd); 683 dma_desc->len = cpu_to_le16(len); 684 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 685 686 if (host->flags & SDHCI_USE_64_BIT_DMA) 687 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 688 689 *desc += host->desc_sz; 690 } 691 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 692 693 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 694 void **desc, dma_addr_t addr, 695 int len, unsigned int cmd) 696 { 697 if (host->ops->adma_write_desc) 698 host->ops->adma_write_desc(host, desc, addr, len, cmd); 699 else 700 sdhci_adma_write_desc(host, desc, addr, len, cmd); 701 } 702 703 static void sdhci_adma_mark_end(void *desc) 704 { 705 struct sdhci_adma2_64_desc *dma_desc = desc; 706 707 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 708 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 709 } 710 711 static void sdhci_adma_table_pre(struct sdhci_host *host, 712 struct mmc_data *data, int sg_count) 713 { 714 struct scatterlist *sg; 715 unsigned long flags; 716 dma_addr_t addr, align_addr; 717 void *desc, *align; 718 char *buffer; 719 int len, offset, i; 720 721 /* 722 * The spec does not specify endianness of descriptor table. 723 * We currently guess that it is LE. 724 */ 725 726 host->sg_count = sg_count; 727 728 desc = host->adma_table; 729 align = host->align_buffer; 730 731 align_addr = host->align_addr; 732 733 for_each_sg(data->sg, sg, host->sg_count, i) { 734 addr = sg_dma_address(sg); 735 len = sg_dma_len(sg); 736 737 /* 738 * The SDHCI specification states that ADMA addresses must 739 * be 32-bit aligned. If they aren't, then we use a bounce 740 * buffer for the (up to three) bytes that screw up the 741 * alignment. 742 */ 743 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 744 SDHCI_ADMA2_MASK; 745 if (offset) { 746 if (data->flags & MMC_DATA_WRITE) { 747 buffer = sdhci_kmap_atomic(sg, &flags); 748 memcpy(align, buffer, offset); 749 sdhci_kunmap_atomic(buffer, &flags); 750 } 751 752 /* tran, valid */ 753 __sdhci_adma_write_desc(host, &desc, align_addr, 754 offset, ADMA2_TRAN_VALID); 755 756 BUG_ON(offset > 65536); 757 758 align += SDHCI_ADMA2_ALIGN; 759 align_addr += SDHCI_ADMA2_ALIGN; 760 761 addr += offset; 762 len -= offset; 763 } 764 765 BUG_ON(len > 65536); 766 767 /* tran, valid */ 768 if (len) 769 __sdhci_adma_write_desc(host, &desc, addr, len, 770 ADMA2_TRAN_VALID); 771 772 /* 773 * If this triggers then we have a calculation bug 774 * somewhere. :/ 775 */ 776 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 777 } 778 779 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 780 /* Mark the last descriptor as the terminating descriptor */ 781 if (desc != host->adma_table) { 782 desc -= host->desc_sz; 783 sdhci_adma_mark_end(desc); 784 } 785 } else { 786 /* Add a terminating entry - nop, end, valid */ 787 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 788 } 789 } 790 791 static void sdhci_adma_table_post(struct sdhci_host *host, 792 struct mmc_data *data) 793 { 794 struct scatterlist *sg; 795 int i, size; 796 void *align; 797 char *buffer; 798 unsigned long flags; 799 800 if (data->flags & MMC_DATA_READ) { 801 bool has_unaligned = false; 802 803 /* Do a quick scan of the SG list for any unaligned mappings */ 804 for_each_sg(data->sg, sg, host->sg_count, i) 805 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 806 has_unaligned = true; 807 break; 808 } 809 810 if (has_unaligned) { 811 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 812 data->sg_len, DMA_FROM_DEVICE); 813 814 align = host->align_buffer; 815 816 for_each_sg(data->sg, sg, host->sg_count, i) { 817 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 818 size = SDHCI_ADMA2_ALIGN - 819 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 820 821 buffer = sdhci_kmap_atomic(sg, &flags); 822 memcpy(buffer, align, size); 823 sdhci_kunmap_atomic(buffer, &flags); 824 825 align += SDHCI_ADMA2_ALIGN; 826 } 827 } 828 } 829 } 830 } 831 832 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 833 { 834 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 835 if (host->flags & SDHCI_USE_64_BIT_DMA) 836 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 837 } 838 839 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 840 { 841 if (host->bounce_buffer) 842 return host->bounce_addr; 843 else 844 return sg_dma_address(host->data->sg); 845 } 846 847 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 848 { 849 if (host->v4_mode) 850 sdhci_set_adma_addr(host, addr); 851 else 852 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 853 } 854 855 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 856 struct mmc_command *cmd, 857 struct mmc_data *data) 858 { 859 unsigned int target_timeout; 860 861 /* timeout in us */ 862 if (!data) { 863 target_timeout = cmd->busy_timeout * 1000; 864 } else { 865 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 866 if (host->clock && data->timeout_clks) { 867 unsigned long long val; 868 869 /* 870 * data->timeout_clks is in units of clock cycles. 871 * host->clock is in Hz. target_timeout is in us. 872 * Hence, us = 1000000 * cycles / Hz. Round up. 873 */ 874 val = 1000000ULL * data->timeout_clks; 875 if (do_div(val, host->clock)) 876 target_timeout++; 877 target_timeout += val; 878 } 879 } 880 881 return target_timeout; 882 } 883 884 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 885 struct mmc_command *cmd) 886 { 887 struct mmc_data *data = cmd->data; 888 struct mmc_host *mmc = host->mmc; 889 struct mmc_ios *ios = &mmc->ios; 890 unsigned char bus_width = 1 << ios->bus_width; 891 unsigned int blksz; 892 unsigned int freq; 893 u64 target_timeout; 894 u64 transfer_time; 895 896 target_timeout = sdhci_target_timeout(host, cmd, data); 897 target_timeout *= NSEC_PER_USEC; 898 899 if (data) { 900 blksz = data->blksz; 901 freq = host->mmc->actual_clock ? : host->clock; 902 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 903 do_div(transfer_time, freq); 904 /* multiply by '2' to account for any unknowns */ 905 transfer_time = transfer_time * 2; 906 /* calculate timeout for the entire data */ 907 host->data_timeout = data->blocks * target_timeout + 908 transfer_time; 909 } else { 910 host->data_timeout = target_timeout; 911 } 912 913 if (host->data_timeout) 914 host->data_timeout += MMC_CMD_TRANSFER_TIME; 915 } 916 917 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 918 bool *too_big) 919 { 920 u8 count; 921 struct mmc_data *data; 922 unsigned target_timeout, current_timeout; 923 924 *too_big = true; 925 926 /* 927 * If the host controller provides us with an incorrect timeout 928 * value, just skip the check and use 0xE. The hardware may take 929 * longer to time out, but that's much better than having a too-short 930 * timeout value. 931 */ 932 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 933 return 0xE; 934 935 /* Unspecified command, asume max */ 936 if (cmd == NULL) 937 return 0xE; 938 939 data = cmd->data; 940 /* Unspecified timeout, assume max */ 941 if (!data && !cmd->busy_timeout) 942 return 0xE; 943 944 /* timeout in us */ 945 target_timeout = sdhci_target_timeout(host, cmd, data); 946 947 /* 948 * Figure out needed cycles. 949 * We do this in steps in order to fit inside a 32 bit int. 950 * The first step is the minimum timeout, which will have a 951 * minimum resolution of 6 bits: 952 * (1) 2^13*1000 > 2^22, 953 * (2) host->timeout_clk < 2^16 954 * => 955 * (1) / (2) > 2^6 956 */ 957 count = 0; 958 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 959 while (current_timeout < target_timeout) { 960 count++; 961 current_timeout <<= 1; 962 if (count >= 0xF) 963 break; 964 } 965 966 if (count >= 0xF) { 967 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 968 DBG("Too large timeout 0x%x requested for CMD%d!\n", 969 count, cmd->opcode); 970 count = 0xE; 971 } else { 972 *too_big = false; 973 } 974 975 return count; 976 } 977 978 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 979 { 980 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 981 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 982 983 if (host->flags & SDHCI_REQ_USE_DMA) 984 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 985 else 986 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 987 988 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 989 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 990 else 991 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 992 993 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 994 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 995 } 996 997 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 998 { 999 if (enable) 1000 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1001 else 1002 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1003 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1004 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1005 } 1006 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1007 1008 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1009 { 1010 bool too_big = false; 1011 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1012 1013 if (too_big && 1014 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1015 sdhci_calc_sw_timeout(host, cmd); 1016 sdhci_set_data_timeout_irq(host, false); 1017 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1018 sdhci_set_data_timeout_irq(host, true); 1019 } 1020 1021 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1022 } 1023 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1024 1025 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1026 { 1027 if (host->ops->set_timeout) 1028 host->ops->set_timeout(host, cmd); 1029 else 1030 __sdhci_set_timeout(host, cmd); 1031 } 1032 1033 static void sdhci_initialize_data(struct sdhci_host *host, 1034 struct mmc_data *data) 1035 { 1036 WARN_ON(host->data); 1037 1038 /* Sanity checks */ 1039 BUG_ON(data->blksz * data->blocks > 524288); 1040 BUG_ON(data->blksz > host->mmc->max_blk_size); 1041 BUG_ON(data->blocks > 65535); 1042 1043 host->data = data; 1044 host->data_early = 0; 1045 host->data->bytes_xfered = 0; 1046 } 1047 1048 static inline void sdhci_set_block_info(struct sdhci_host *host, 1049 struct mmc_data *data) 1050 { 1051 /* Set the DMA boundary value and block size */ 1052 sdhci_writew(host, 1053 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1054 SDHCI_BLOCK_SIZE); 1055 /* 1056 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1057 * can be supported, in that case 16-bit block count register must be 0. 1058 */ 1059 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1060 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1061 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1062 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1063 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1064 } else { 1065 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1066 } 1067 } 1068 1069 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1070 { 1071 struct mmc_data *data = cmd->data; 1072 1073 sdhci_initialize_data(host, data); 1074 1075 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1076 struct scatterlist *sg; 1077 unsigned int length_mask, offset_mask; 1078 int i; 1079 1080 host->flags |= SDHCI_REQ_USE_DMA; 1081 1082 /* 1083 * FIXME: This doesn't account for merging when mapping the 1084 * scatterlist. 1085 * 1086 * The assumption here being that alignment and lengths are 1087 * the same after DMA mapping to device address space. 1088 */ 1089 length_mask = 0; 1090 offset_mask = 0; 1091 if (host->flags & SDHCI_USE_ADMA) { 1092 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1093 length_mask = 3; 1094 /* 1095 * As we use up to 3 byte chunks to work 1096 * around alignment problems, we need to 1097 * check the offset as well. 1098 */ 1099 offset_mask = 3; 1100 } 1101 } else { 1102 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1103 length_mask = 3; 1104 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1105 offset_mask = 3; 1106 } 1107 1108 if (unlikely(length_mask | offset_mask)) { 1109 for_each_sg(data->sg, sg, data->sg_len, i) { 1110 if (sg->length & length_mask) { 1111 DBG("Reverting to PIO because of transfer size (%d)\n", 1112 sg->length); 1113 host->flags &= ~SDHCI_REQ_USE_DMA; 1114 break; 1115 } 1116 if (sg->offset & offset_mask) { 1117 DBG("Reverting to PIO because of bad alignment\n"); 1118 host->flags &= ~SDHCI_REQ_USE_DMA; 1119 break; 1120 } 1121 } 1122 } 1123 } 1124 1125 if (host->flags & SDHCI_REQ_USE_DMA) { 1126 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1127 1128 if (sg_cnt <= 0) { 1129 /* 1130 * This only happens when someone fed 1131 * us an invalid request. 1132 */ 1133 WARN_ON(1); 1134 host->flags &= ~SDHCI_REQ_USE_DMA; 1135 } else if (host->flags & SDHCI_USE_ADMA) { 1136 sdhci_adma_table_pre(host, data, sg_cnt); 1137 sdhci_set_adma_addr(host, host->adma_addr); 1138 } else { 1139 WARN_ON(sg_cnt != 1); 1140 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1141 } 1142 } 1143 1144 sdhci_config_dma(host); 1145 1146 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1147 int flags; 1148 1149 flags = SG_MITER_ATOMIC; 1150 if (host->data->flags & MMC_DATA_READ) 1151 flags |= SG_MITER_TO_SG; 1152 else 1153 flags |= SG_MITER_FROM_SG; 1154 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1155 host->blocks = data->blocks; 1156 } 1157 1158 sdhci_set_transfer_irqs(host); 1159 1160 sdhci_set_block_info(host, data); 1161 } 1162 1163 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1164 1165 static int sdhci_external_dma_init(struct sdhci_host *host) 1166 { 1167 int ret = 0; 1168 struct mmc_host *mmc = host->mmc; 1169 1170 host->tx_chan = dma_request_chan(mmc->parent, "tx"); 1171 if (IS_ERR(host->tx_chan)) { 1172 ret = PTR_ERR(host->tx_chan); 1173 if (ret != -EPROBE_DEFER) 1174 pr_warn("Failed to request TX DMA channel.\n"); 1175 host->tx_chan = NULL; 1176 return ret; 1177 } 1178 1179 host->rx_chan = dma_request_chan(mmc->parent, "rx"); 1180 if (IS_ERR(host->rx_chan)) { 1181 if (host->tx_chan) { 1182 dma_release_channel(host->tx_chan); 1183 host->tx_chan = NULL; 1184 } 1185 1186 ret = PTR_ERR(host->rx_chan); 1187 if (ret != -EPROBE_DEFER) 1188 pr_warn("Failed to request RX DMA channel.\n"); 1189 host->rx_chan = NULL; 1190 } 1191 1192 return ret; 1193 } 1194 1195 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1196 struct mmc_data *data) 1197 { 1198 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1199 } 1200 1201 static int sdhci_external_dma_setup(struct sdhci_host *host, 1202 struct mmc_command *cmd) 1203 { 1204 int ret, i; 1205 enum dma_transfer_direction dir; 1206 struct dma_async_tx_descriptor *desc; 1207 struct mmc_data *data = cmd->data; 1208 struct dma_chan *chan; 1209 struct dma_slave_config cfg; 1210 dma_cookie_t cookie; 1211 int sg_cnt; 1212 1213 if (!host->mapbase) 1214 return -EINVAL; 1215 1216 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1217 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1218 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1219 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1220 cfg.src_maxburst = data->blksz / 4; 1221 cfg.dst_maxburst = data->blksz / 4; 1222 1223 /* Sanity check: all the SG entries must be aligned by block size. */ 1224 for (i = 0; i < data->sg_len; i++) { 1225 if ((data->sg + i)->length % data->blksz) 1226 return -EINVAL; 1227 } 1228 1229 chan = sdhci_external_dma_channel(host, data); 1230 1231 ret = dmaengine_slave_config(chan, &cfg); 1232 if (ret) 1233 return ret; 1234 1235 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1236 if (sg_cnt <= 0) 1237 return -EINVAL; 1238 1239 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1240 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1241 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1242 if (!desc) 1243 return -EINVAL; 1244 1245 desc->callback = NULL; 1246 desc->callback_param = NULL; 1247 1248 cookie = dmaengine_submit(desc); 1249 if (dma_submit_error(cookie)) 1250 ret = cookie; 1251 1252 return ret; 1253 } 1254 1255 static void sdhci_external_dma_release(struct sdhci_host *host) 1256 { 1257 if (host->tx_chan) { 1258 dma_release_channel(host->tx_chan); 1259 host->tx_chan = NULL; 1260 } 1261 1262 if (host->rx_chan) { 1263 dma_release_channel(host->rx_chan); 1264 host->rx_chan = NULL; 1265 } 1266 1267 sdhci_switch_external_dma(host, false); 1268 } 1269 1270 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1271 struct mmc_command *cmd) 1272 { 1273 struct mmc_data *data = cmd->data; 1274 1275 sdhci_initialize_data(host, data); 1276 1277 host->flags |= SDHCI_REQ_USE_DMA; 1278 sdhci_set_transfer_irqs(host); 1279 1280 sdhci_set_block_info(host, data); 1281 } 1282 1283 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1284 struct mmc_command *cmd) 1285 { 1286 if (!sdhci_external_dma_setup(host, cmd)) { 1287 __sdhci_external_dma_prepare_data(host, cmd); 1288 } else { 1289 sdhci_external_dma_release(host); 1290 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1291 mmc_hostname(host->mmc)); 1292 sdhci_prepare_data(host, cmd); 1293 } 1294 } 1295 1296 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1297 struct mmc_command *cmd) 1298 { 1299 struct dma_chan *chan; 1300 1301 if (!cmd->data) 1302 return; 1303 1304 chan = sdhci_external_dma_channel(host, cmd->data); 1305 if (chan) 1306 dma_async_issue_pending(chan); 1307 } 1308 1309 #else 1310 1311 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1312 { 1313 return -EOPNOTSUPP; 1314 } 1315 1316 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1317 { 1318 } 1319 1320 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1321 struct mmc_command *cmd) 1322 { 1323 /* This should never happen */ 1324 WARN_ON_ONCE(1); 1325 } 1326 1327 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1328 struct mmc_command *cmd) 1329 { 1330 } 1331 1332 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1333 struct mmc_data *data) 1334 { 1335 return NULL; 1336 } 1337 1338 #endif 1339 1340 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1341 { 1342 host->use_external_dma = en; 1343 } 1344 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1345 1346 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1347 struct mmc_request *mrq) 1348 { 1349 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1350 !mrq->cap_cmd_during_tfr; 1351 } 1352 1353 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1354 struct mmc_command *cmd, 1355 u16 *mode) 1356 { 1357 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1358 (cmd->opcode != SD_IO_RW_EXTENDED); 1359 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1360 u16 ctrl2; 1361 1362 /* 1363 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1364 * Select' is recommended rather than use of 'Auto CMD12 1365 * Enable' or 'Auto CMD23 Enable'. 1366 */ 1367 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { 1368 *mode |= SDHCI_TRNS_AUTO_SEL; 1369 1370 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1371 if (use_cmd23) 1372 ctrl2 |= SDHCI_CMD23_ENABLE; 1373 else 1374 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1375 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1376 1377 return; 1378 } 1379 1380 /* 1381 * If we are sending CMD23, CMD12 never gets sent 1382 * on successful completion (so no Auto-CMD12). 1383 */ 1384 if (use_cmd12) 1385 *mode |= SDHCI_TRNS_AUTO_CMD12; 1386 else if (use_cmd23) 1387 *mode |= SDHCI_TRNS_AUTO_CMD23; 1388 } 1389 1390 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1391 struct mmc_command *cmd) 1392 { 1393 u16 mode = 0; 1394 struct mmc_data *data = cmd->data; 1395 1396 if (data == NULL) { 1397 if (host->quirks2 & 1398 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1399 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1400 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1401 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1402 } else { 1403 /* clear Auto CMD settings for no data CMDs */ 1404 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1405 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1406 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1407 } 1408 return; 1409 } 1410 1411 WARN_ON(!host->data); 1412 1413 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1414 mode = SDHCI_TRNS_BLK_CNT_EN; 1415 1416 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1417 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1418 sdhci_auto_cmd_select(host, cmd, &mode); 1419 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) 1420 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1421 } 1422 1423 if (data->flags & MMC_DATA_READ) 1424 mode |= SDHCI_TRNS_READ; 1425 if (host->flags & SDHCI_REQ_USE_DMA) 1426 mode |= SDHCI_TRNS_DMA; 1427 1428 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1429 } 1430 1431 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1432 { 1433 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1434 ((mrq->cmd && mrq->cmd->error) || 1435 (mrq->sbc && mrq->sbc->error) || 1436 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1437 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1438 } 1439 1440 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1441 { 1442 int i; 1443 1444 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1445 if (host->mrqs_done[i] == mrq) { 1446 WARN_ON(1); 1447 return; 1448 } 1449 } 1450 1451 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1452 if (!host->mrqs_done[i]) { 1453 host->mrqs_done[i] = mrq; 1454 break; 1455 } 1456 } 1457 1458 WARN_ON(i >= SDHCI_MAX_MRQS); 1459 } 1460 1461 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1462 { 1463 if (host->cmd && host->cmd->mrq == mrq) 1464 host->cmd = NULL; 1465 1466 if (host->data_cmd && host->data_cmd->mrq == mrq) 1467 host->data_cmd = NULL; 1468 1469 if (host->data && host->data->mrq == mrq) 1470 host->data = NULL; 1471 1472 if (sdhci_needs_reset(host, mrq)) 1473 host->pending_reset = true; 1474 1475 sdhci_set_mrq_done(host, mrq); 1476 1477 sdhci_del_timer(host, mrq); 1478 1479 if (!sdhci_has_requests(host)) 1480 sdhci_led_deactivate(host); 1481 } 1482 1483 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1484 { 1485 __sdhci_finish_mrq(host, mrq); 1486 1487 queue_work(host->complete_wq, &host->complete_work); 1488 } 1489 1490 static void sdhci_finish_data(struct sdhci_host *host) 1491 { 1492 struct mmc_command *data_cmd = host->data_cmd; 1493 struct mmc_data *data = host->data; 1494 1495 host->data = NULL; 1496 host->data_cmd = NULL; 1497 1498 /* 1499 * The controller needs a reset of internal state machines upon error 1500 * conditions. 1501 */ 1502 if (data->error) { 1503 if (!host->cmd || host->cmd == data_cmd) 1504 sdhci_do_reset(host, SDHCI_RESET_CMD); 1505 sdhci_do_reset(host, SDHCI_RESET_DATA); 1506 } 1507 1508 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1509 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1510 sdhci_adma_table_post(host, data); 1511 1512 /* 1513 * The specification states that the block count register must 1514 * be updated, but it does not specify at what point in the 1515 * data flow. That makes the register entirely useless to read 1516 * back so we have to assume that nothing made it to the card 1517 * in the event of an error. 1518 */ 1519 if (data->error) 1520 data->bytes_xfered = 0; 1521 else 1522 data->bytes_xfered = data->blksz * data->blocks; 1523 1524 /* 1525 * Need to send CMD12 if - 1526 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1527 * b) error in multiblock transfer 1528 */ 1529 if (data->stop && 1530 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1531 data->error)) { 1532 /* 1533 * 'cap_cmd_during_tfr' request must not use the command line 1534 * after mmc_command_done() has been called. It is upper layer's 1535 * responsibility to send the stop command if required. 1536 */ 1537 if (data->mrq->cap_cmd_during_tfr) { 1538 __sdhci_finish_mrq(host, data->mrq); 1539 } else { 1540 /* Avoid triggering warning in sdhci_send_command() */ 1541 host->cmd = NULL; 1542 sdhci_send_command(host, data->stop); 1543 } 1544 } else { 1545 __sdhci_finish_mrq(host, data->mrq); 1546 } 1547 } 1548 1549 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1550 { 1551 int flags; 1552 u32 mask; 1553 unsigned long timeout; 1554 1555 WARN_ON(host->cmd); 1556 1557 /* Initially, a command has no error */ 1558 cmd->error = 0; 1559 1560 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1561 cmd->opcode == MMC_STOP_TRANSMISSION) 1562 cmd->flags |= MMC_RSP_BUSY; 1563 1564 /* Wait max 10 ms */ 1565 timeout = 10; 1566 1567 mask = SDHCI_CMD_INHIBIT; 1568 if (sdhci_data_line_cmd(cmd)) 1569 mask |= SDHCI_DATA_INHIBIT; 1570 1571 /* We shouldn't wait for data inihibit for stop commands, even 1572 though they might use busy signaling */ 1573 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1574 mask &= ~SDHCI_DATA_INHIBIT; 1575 1576 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1577 if (timeout == 0) { 1578 pr_err("%s: Controller never released inhibit bit(s).\n", 1579 mmc_hostname(host->mmc)); 1580 sdhci_dumpregs(host); 1581 cmd->error = -EIO; 1582 sdhci_finish_mrq(host, cmd->mrq); 1583 return; 1584 } 1585 timeout--; 1586 mdelay(1); 1587 } 1588 1589 host->cmd = cmd; 1590 host->data_timeout = 0; 1591 if (sdhci_data_line_cmd(cmd)) { 1592 WARN_ON(host->data_cmd); 1593 host->data_cmd = cmd; 1594 sdhci_set_timeout(host, cmd); 1595 } 1596 1597 if (cmd->data) { 1598 if (host->use_external_dma) 1599 sdhci_external_dma_prepare_data(host, cmd); 1600 else 1601 sdhci_prepare_data(host, cmd); 1602 } 1603 1604 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1605 1606 sdhci_set_transfer_mode(host, cmd); 1607 1608 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1609 pr_err("%s: Unsupported response type!\n", 1610 mmc_hostname(host->mmc)); 1611 cmd->error = -EINVAL; 1612 sdhci_finish_mrq(host, cmd->mrq); 1613 return; 1614 } 1615 1616 if (!(cmd->flags & MMC_RSP_PRESENT)) 1617 flags = SDHCI_CMD_RESP_NONE; 1618 else if (cmd->flags & MMC_RSP_136) 1619 flags = SDHCI_CMD_RESP_LONG; 1620 else if (cmd->flags & MMC_RSP_BUSY) 1621 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1622 else 1623 flags = SDHCI_CMD_RESP_SHORT; 1624 1625 if (cmd->flags & MMC_RSP_CRC) 1626 flags |= SDHCI_CMD_CRC; 1627 if (cmd->flags & MMC_RSP_OPCODE) 1628 flags |= SDHCI_CMD_INDEX; 1629 1630 /* CMD19 is special in that the Data Present Select should be set */ 1631 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1632 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1633 flags |= SDHCI_CMD_DATA; 1634 1635 timeout = jiffies; 1636 if (host->data_timeout) 1637 timeout += nsecs_to_jiffies(host->data_timeout); 1638 else if (!cmd->data && cmd->busy_timeout > 9000) 1639 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1640 else 1641 timeout += 10 * HZ; 1642 sdhci_mod_timer(host, cmd->mrq, timeout); 1643 1644 if (host->use_external_dma) 1645 sdhci_external_dma_pre_transfer(host, cmd); 1646 1647 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1648 } 1649 EXPORT_SYMBOL_GPL(sdhci_send_command); 1650 1651 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1652 { 1653 int i, reg; 1654 1655 for (i = 0; i < 4; i++) { 1656 reg = SDHCI_RESPONSE + (3 - i) * 4; 1657 cmd->resp[i] = sdhci_readl(host, reg); 1658 } 1659 1660 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1661 return; 1662 1663 /* CRC is stripped so we need to do some shifting */ 1664 for (i = 0; i < 4; i++) { 1665 cmd->resp[i] <<= 8; 1666 if (i != 3) 1667 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1668 } 1669 } 1670 1671 static void sdhci_finish_command(struct sdhci_host *host) 1672 { 1673 struct mmc_command *cmd = host->cmd; 1674 1675 host->cmd = NULL; 1676 1677 if (cmd->flags & MMC_RSP_PRESENT) { 1678 if (cmd->flags & MMC_RSP_136) { 1679 sdhci_read_rsp_136(host, cmd); 1680 } else { 1681 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1682 } 1683 } 1684 1685 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1686 mmc_command_done(host->mmc, cmd->mrq); 1687 1688 /* 1689 * The host can send and interrupt when the busy state has 1690 * ended, allowing us to wait without wasting CPU cycles. 1691 * The busy signal uses DAT0 so this is similar to waiting 1692 * for data to complete. 1693 * 1694 * Note: The 1.0 specification is a bit ambiguous about this 1695 * feature so there might be some problems with older 1696 * controllers. 1697 */ 1698 if (cmd->flags & MMC_RSP_BUSY) { 1699 if (cmd->data) { 1700 DBG("Cannot wait for busy signal when also doing a data transfer"); 1701 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1702 cmd == host->data_cmd) { 1703 /* Command complete before busy is ended */ 1704 return; 1705 } 1706 } 1707 1708 /* Finished CMD23, now send actual command. */ 1709 if (cmd == cmd->mrq->sbc) { 1710 sdhci_send_command(host, cmd->mrq->cmd); 1711 } else { 1712 1713 /* Processed actual command. */ 1714 if (host->data && host->data_early) 1715 sdhci_finish_data(host); 1716 1717 if (!cmd->data) 1718 __sdhci_finish_mrq(host, cmd->mrq); 1719 } 1720 } 1721 1722 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1723 { 1724 u16 preset = 0; 1725 1726 switch (host->timing) { 1727 case MMC_TIMING_UHS_SDR12: 1728 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1729 break; 1730 case MMC_TIMING_UHS_SDR25: 1731 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1732 break; 1733 case MMC_TIMING_UHS_SDR50: 1734 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1735 break; 1736 case MMC_TIMING_UHS_SDR104: 1737 case MMC_TIMING_MMC_HS200: 1738 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1739 break; 1740 case MMC_TIMING_UHS_DDR50: 1741 case MMC_TIMING_MMC_DDR52: 1742 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1743 break; 1744 case MMC_TIMING_MMC_HS400: 1745 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1746 break; 1747 default: 1748 pr_warn("%s: Invalid UHS-I mode selected\n", 1749 mmc_hostname(host->mmc)); 1750 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1751 break; 1752 } 1753 return preset; 1754 } 1755 1756 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1757 unsigned int *actual_clock) 1758 { 1759 int div = 0; /* Initialized for compiler warning */ 1760 int real_div = div, clk_mul = 1; 1761 u16 clk = 0; 1762 bool switch_base_clk = false; 1763 1764 if (host->version >= SDHCI_SPEC_300) { 1765 if (host->preset_enabled) { 1766 u16 pre_val; 1767 1768 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1769 pre_val = sdhci_get_preset_value(host); 1770 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1771 if (host->clk_mul && 1772 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1773 clk = SDHCI_PROG_CLOCK_MODE; 1774 real_div = div + 1; 1775 clk_mul = host->clk_mul; 1776 } else { 1777 real_div = max_t(int, 1, div << 1); 1778 } 1779 goto clock_set; 1780 } 1781 1782 /* 1783 * Check if the Host Controller supports Programmable Clock 1784 * Mode. 1785 */ 1786 if (host->clk_mul) { 1787 for (div = 1; div <= 1024; div++) { 1788 if ((host->max_clk * host->clk_mul / div) 1789 <= clock) 1790 break; 1791 } 1792 if ((host->max_clk * host->clk_mul / div) <= clock) { 1793 /* 1794 * Set Programmable Clock Mode in the Clock 1795 * Control register. 1796 */ 1797 clk = SDHCI_PROG_CLOCK_MODE; 1798 real_div = div; 1799 clk_mul = host->clk_mul; 1800 div--; 1801 } else { 1802 /* 1803 * Divisor can be too small to reach clock 1804 * speed requirement. Then use the base clock. 1805 */ 1806 switch_base_clk = true; 1807 } 1808 } 1809 1810 if (!host->clk_mul || switch_base_clk) { 1811 /* Version 3.00 divisors must be a multiple of 2. */ 1812 if (host->max_clk <= clock) 1813 div = 1; 1814 else { 1815 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1816 div += 2) { 1817 if ((host->max_clk / div) <= clock) 1818 break; 1819 } 1820 } 1821 real_div = div; 1822 div >>= 1; 1823 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1824 && !div && host->max_clk <= 25000000) 1825 div = 1; 1826 } 1827 } else { 1828 /* Version 2.00 divisors must be a power of 2. */ 1829 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1830 if ((host->max_clk / div) <= clock) 1831 break; 1832 } 1833 real_div = div; 1834 div >>= 1; 1835 } 1836 1837 clock_set: 1838 if (real_div) 1839 *actual_clock = (host->max_clk * clk_mul) / real_div; 1840 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1841 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1842 << SDHCI_DIVIDER_HI_SHIFT; 1843 1844 return clk; 1845 } 1846 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1847 1848 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1849 { 1850 ktime_t timeout; 1851 1852 clk |= SDHCI_CLOCK_INT_EN; 1853 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1854 1855 /* Wait max 150 ms */ 1856 timeout = ktime_add_ms(ktime_get(), 150); 1857 while (1) { 1858 bool timedout = ktime_after(ktime_get(), timeout); 1859 1860 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1861 if (clk & SDHCI_CLOCK_INT_STABLE) 1862 break; 1863 if (timedout) { 1864 pr_err("%s: Internal clock never stabilised.\n", 1865 mmc_hostname(host->mmc)); 1866 sdhci_dumpregs(host); 1867 return; 1868 } 1869 udelay(10); 1870 } 1871 1872 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 1873 clk |= SDHCI_CLOCK_PLL_EN; 1874 clk &= ~SDHCI_CLOCK_INT_STABLE; 1875 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1876 1877 /* Wait max 150 ms */ 1878 timeout = ktime_add_ms(ktime_get(), 150); 1879 while (1) { 1880 bool timedout = ktime_after(ktime_get(), timeout); 1881 1882 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1883 if (clk & SDHCI_CLOCK_INT_STABLE) 1884 break; 1885 if (timedout) { 1886 pr_err("%s: PLL clock never stabilised.\n", 1887 mmc_hostname(host->mmc)); 1888 sdhci_dumpregs(host); 1889 return; 1890 } 1891 udelay(10); 1892 } 1893 } 1894 1895 clk |= SDHCI_CLOCK_CARD_EN; 1896 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1897 } 1898 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1899 1900 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1901 { 1902 u16 clk; 1903 1904 host->mmc->actual_clock = 0; 1905 1906 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1907 1908 if (clock == 0) 1909 return; 1910 1911 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1912 sdhci_enable_clk(host, clk); 1913 } 1914 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1915 1916 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1917 unsigned short vdd) 1918 { 1919 struct mmc_host *mmc = host->mmc; 1920 1921 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1922 1923 if (mode != MMC_POWER_OFF) 1924 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1925 else 1926 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1927 } 1928 1929 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 1930 unsigned short vdd) 1931 { 1932 u8 pwr = 0; 1933 1934 if (mode != MMC_POWER_OFF) { 1935 switch (1 << vdd) { 1936 case MMC_VDD_165_195: 1937 /* 1938 * Without a regulator, SDHCI does not support 2.0v 1939 * so we only get here if the driver deliberately 1940 * added the 2.0v range to ocr_avail. Map it to 1.8v 1941 * for the purpose of turning on the power. 1942 */ 1943 case MMC_VDD_20_21: 1944 pwr = SDHCI_POWER_180; 1945 break; 1946 case MMC_VDD_29_30: 1947 case MMC_VDD_30_31: 1948 pwr = SDHCI_POWER_300; 1949 break; 1950 case MMC_VDD_32_33: 1951 case MMC_VDD_33_34: 1952 pwr = SDHCI_POWER_330; 1953 break; 1954 default: 1955 WARN(1, "%s: Invalid vdd %#x\n", 1956 mmc_hostname(host->mmc), vdd); 1957 break; 1958 } 1959 } 1960 1961 if (host->pwr == pwr) 1962 return; 1963 1964 host->pwr = pwr; 1965 1966 if (pwr == 0) { 1967 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1968 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1969 sdhci_runtime_pm_bus_off(host); 1970 } else { 1971 /* 1972 * Spec says that we should clear the power reg before setting 1973 * a new value. Some controllers don't seem to like this though. 1974 */ 1975 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1976 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1977 1978 /* 1979 * At least the Marvell CaFe chip gets confused if we set the 1980 * voltage and set turn on power at the same time, so set the 1981 * voltage first. 1982 */ 1983 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1984 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1985 1986 pwr |= SDHCI_POWER_ON; 1987 1988 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1989 1990 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1991 sdhci_runtime_pm_bus_on(host); 1992 1993 /* 1994 * Some controllers need an extra 10ms delay of 10ms before 1995 * they can apply clock after applying power 1996 */ 1997 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1998 mdelay(10); 1999 } 2000 } 2001 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2002 2003 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2004 unsigned short vdd) 2005 { 2006 if (IS_ERR(host->mmc->supply.vmmc)) 2007 sdhci_set_power_noreg(host, mode, vdd); 2008 else 2009 sdhci_set_power_reg(host, mode, vdd); 2010 } 2011 EXPORT_SYMBOL_GPL(sdhci_set_power); 2012 2013 /* 2014 * Some controllers need to configure a valid bus voltage on their power 2015 * register regardless of whether an external regulator is taking care of power 2016 * supply. This helper function takes care of it if set as the controller's 2017 * sdhci_ops.set_power callback. 2018 */ 2019 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2020 unsigned char mode, 2021 unsigned short vdd) 2022 { 2023 if (!IS_ERR(host->mmc->supply.vmmc)) { 2024 struct mmc_host *mmc = host->mmc; 2025 2026 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2027 } 2028 sdhci_set_power_noreg(host, mode, vdd); 2029 } 2030 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2031 2032 /*****************************************************************************\ 2033 * * 2034 * MMC callbacks * 2035 * * 2036 \*****************************************************************************/ 2037 2038 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2039 { 2040 struct sdhci_host *host; 2041 int present; 2042 unsigned long flags; 2043 2044 host = mmc_priv(mmc); 2045 2046 /* Firstly check card presence */ 2047 present = mmc->ops->get_cd(mmc); 2048 2049 spin_lock_irqsave(&host->lock, flags); 2050 2051 sdhci_led_activate(host); 2052 2053 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 2054 mrq->cmd->error = -ENOMEDIUM; 2055 sdhci_finish_mrq(host, mrq); 2056 } else { 2057 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 2058 sdhci_send_command(host, mrq->sbc); 2059 else 2060 sdhci_send_command(host, mrq->cmd); 2061 } 2062 2063 spin_unlock_irqrestore(&host->lock, flags); 2064 } 2065 EXPORT_SYMBOL_GPL(sdhci_request); 2066 2067 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2068 { 2069 u8 ctrl; 2070 2071 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2072 if (width == MMC_BUS_WIDTH_8) { 2073 ctrl &= ~SDHCI_CTRL_4BITBUS; 2074 ctrl |= SDHCI_CTRL_8BITBUS; 2075 } else { 2076 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2077 ctrl &= ~SDHCI_CTRL_8BITBUS; 2078 if (width == MMC_BUS_WIDTH_4) 2079 ctrl |= SDHCI_CTRL_4BITBUS; 2080 else 2081 ctrl &= ~SDHCI_CTRL_4BITBUS; 2082 } 2083 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2084 } 2085 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2086 2087 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2088 { 2089 u16 ctrl_2; 2090 2091 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2092 /* Select Bus Speed Mode for host */ 2093 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2094 if ((timing == MMC_TIMING_MMC_HS200) || 2095 (timing == MMC_TIMING_UHS_SDR104)) 2096 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2097 else if (timing == MMC_TIMING_UHS_SDR12) 2098 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2099 else if (timing == MMC_TIMING_UHS_SDR25) 2100 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2101 else if (timing == MMC_TIMING_UHS_SDR50) 2102 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2103 else if ((timing == MMC_TIMING_UHS_DDR50) || 2104 (timing == MMC_TIMING_MMC_DDR52)) 2105 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2106 else if (timing == MMC_TIMING_MMC_HS400) 2107 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2108 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2109 } 2110 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2111 2112 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2113 { 2114 struct sdhci_host *host = mmc_priv(mmc); 2115 u8 ctrl; 2116 2117 if (ios->power_mode == MMC_POWER_UNDEFINED) 2118 return; 2119 2120 if (host->flags & SDHCI_DEVICE_DEAD) { 2121 if (!IS_ERR(mmc->supply.vmmc) && 2122 ios->power_mode == MMC_POWER_OFF) 2123 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2124 return; 2125 } 2126 2127 /* 2128 * Reset the chip on each power off. 2129 * Should clear out any weird states. 2130 */ 2131 if (ios->power_mode == MMC_POWER_OFF) { 2132 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2133 sdhci_reinit(host); 2134 } 2135 2136 if (host->version >= SDHCI_SPEC_300 && 2137 (ios->power_mode == MMC_POWER_UP) && 2138 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2139 sdhci_enable_preset_value(host, false); 2140 2141 if (!ios->clock || ios->clock != host->clock) { 2142 host->ops->set_clock(host, ios->clock); 2143 host->clock = ios->clock; 2144 2145 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2146 host->clock) { 2147 host->timeout_clk = host->mmc->actual_clock ? 2148 host->mmc->actual_clock / 1000 : 2149 host->clock / 1000; 2150 host->mmc->max_busy_timeout = 2151 host->ops->get_max_timeout_count ? 2152 host->ops->get_max_timeout_count(host) : 2153 1 << 27; 2154 host->mmc->max_busy_timeout /= host->timeout_clk; 2155 } 2156 } 2157 2158 if (host->ops->set_power) 2159 host->ops->set_power(host, ios->power_mode, ios->vdd); 2160 else 2161 sdhci_set_power(host, ios->power_mode, ios->vdd); 2162 2163 if (host->ops->platform_send_init_74_clocks) 2164 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2165 2166 host->ops->set_bus_width(host, ios->bus_width); 2167 2168 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2169 2170 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2171 if (ios->timing == MMC_TIMING_SD_HS || 2172 ios->timing == MMC_TIMING_MMC_HS || 2173 ios->timing == MMC_TIMING_MMC_HS400 || 2174 ios->timing == MMC_TIMING_MMC_HS200 || 2175 ios->timing == MMC_TIMING_MMC_DDR52 || 2176 ios->timing == MMC_TIMING_UHS_SDR50 || 2177 ios->timing == MMC_TIMING_UHS_SDR104 || 2178 ios->timing == MMC_TIMING_UHS_DDR50 || 2179 ios->timing == MMC_TIMING_UHS_SDR25) 2180 ctrl |= SDHCI_CTRL_HISPD; 2181 else 2182 ctrl &= ~SDHCI_CTRL_HISPD; 2183 } 2184 2185 if (host->version >= SDHCI_SPEC_300) { 2186 u16 clk, ctrl_2; 2187 2188 if (!host->preset_enabled) { 2189 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2190 /* 2191 * We only need to set Driver Strength if the 2192 * preset value enable is not set. 2193 */ 2194 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2195 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2196 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2197 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2198 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2199 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2200 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2201 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2202 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2203 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2204 else { 2205 pr_warn("%s: invalid driver type, default to driver type B\n", 2206 mmc_hostname(mmc)); 2207 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2208 } 2209 2210 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2211 } else { 2212 /* 2213 * According to SDHC Spec v3.00, if the Preset Value 2214 * Enable in the Host Control 2 register is set, we 2215 * need to reset SD Clock Enable before changing High 2216 * Speed Enable to avoid generating clock gliches. 2217 */ 2218 2219 /* Reset SD Clock Enable */ 2220 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2221 clk &= ~SDHCI_CLOCK_CARD_EN; 2222 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2223 2224 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2225 2226 /* Re-enable SD Clock */ 2227 host->ops->set_clock(host, host->clock); 2228 } 2229 2230 /* Reset SD Clock Enable */ 2231 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2232 clk &= ~SDHCI_CLOCK_CARD_EN; 2233 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2234 2235 host->ops->set_uhs_signaling(host, ios->timing); 2236 host->timing = ios->timing; 2237 2238 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2239 ((ios->timing == MMC_TIMING_UHS_SDR12) || 2240 (ios->timing == MMC_TIMING_UHS_SDR25) || 2241 (ios->timing == MMC_TIMING_UHS_SDR50) || 2242 (ios->timing == MMC_TIMING_UHS_SDR104) || 2243 (ios->timing == MMC_TIMING_UHS_DDR50) || 2244 (ios->timing == MMC_TIMING_MMC_DDR52))) { 2245 u16 preset; 2246 2247 sdhci_enable_preset_value(host, true); 2248 preset = sdhci_get_preset_value(host); 2249 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2250 preset); 2251 } 2252 2253 /* Re-enable SD Clock */ 2254 host->ops->set_clock(host, host->clock); 2255 } else 2256 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2257 2258 /* 2259 * Some (ENE) controllers go apeshit on some ios operation, 2260 * signalling timeout and CRC errors even on CMD0. Resetting 2261 * it on each ios seems to solve the problem. 2262 */ 2263 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 2264 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2265 } 2266 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2267 2268 static int sdhci_get_cd(struct mmc_host *mmc) 2269 { 2270 struct sdhci_host *host = mmc_priv(mmc); 2271 int gpio_cd = mmc_gpio_get_cd(mmc); 2272 2273 if (host->flags & SDHCI_DEVICE_DEAD) 2274 return 0; 2275 2276 /* If nonremovable, assume that the card is always present. */ 2277 if (!mmc_card_is_removable(host->mmc)) 2278 return 1; 2279 2280 /* 2281 * Try slot gpio detect, if defined it take precedence 2282 * over build in controller functionality 2283 */ 2284 if (gpio_cd >= 0) 2285 return !!gpio_cd; 2286 2287 /* If polling, assume that the card is always present. */ 2288 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2289 return 1; 2290 2291 /* Host native card detect */ 2292 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2293 } 2294 2295 static int sdhci_check_ro(struct sdhci_host *host) 2296 { 2297 unsigned long flags; 2298 int is_readonly; 2299 2300 spin_lock_irqsave(&host->lock, flags); 2301 2302 if (host->flags & SDHCI_DEVICE_DEAD) 2303 is_readonly = 0; 2304 else if (host->ops->get_ro) 2305 is_readonly = host->ops->get_ro(host); 2306 else if (mmc_can_gpio_ro(host->mmc)) 2307 is_readonly = mmc_gpio_get_ro(host->mmc); 2308 else 2309 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2310 & SDHCI_WRITE_PROTECT); 2311 2312 spin_unlock_irqrestore(&host->lock, flags); 2313 2314 /* This quirk needs to be replaced by a callback-function later */ 2315 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2316 !is_readonly : is_readonly; 2317 } 2318 2319 #define SAMPLE_COUNT 5 2320 2321 static int sdhci_get_ro(struct mmc_host *mmc) 2322 { 2323 struct sdhci_host *host = mmc_priv(mmc); 2324 int i, ro_count; 2325 2326 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2327 return sdhci_check_ro(host); 2328 2329 ro_count = 0; 2330 for (i = 0; i < SAMPLE_COUNT; i++) { 2331 if (sdhci_check_ro(host)) { 2332 if (++ro_count > SAMPLE_COUNT / 2) 2333 return 1; 2334 } 2335 msleep(30); 2336 } 2337 return 0; 2338 } 2339 2340 static void sdhci_hw_reset(struct mmc_host *mmc) 2341 { 2342 struct sdhci_host *host = mmc_priv(mmc); 2343 2344 if (host->ops && host->ops->hw_reset) 2345 host->ops->hw_reset(host); 2346 } 2347 2348 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2349 { 2350 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2351 if (enable) 2352 host->ier |= SDHCI_INT_CARD_INT; 2353 else 2354 host->ier &= ~SDHCI_INT_CARD_INT; 2355 2356 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2357 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2358 } 2359 } 2360 2361 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2362 { 2363 struct sdhci_host *host = mmc_priv(mmc); 2364 unsigned long flags; 2365 2366 if (enable) 2367 pm_runtime_get_noresume(host->mmc->parent); 2368 2369 spin_lock_irqsave(&host->lock, flags); 2370 sdhci_enable_sdio_irq_nolock(host, enable); 2371 spin_unlock_irqrestore(&host->lock, flags); 2372 2373 if (!enable) 2374 pm_runtime_put_noidle(host->mmc->parent); 2375 } 2376 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2377 2378 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2379 { 2380 struct sdhci_host *host = mmc_priv(mmc); 2381 unsigned long flags; 2382 2383 spin_lock_irqsave(&host->lock, flags); 2384 sdhci_enable_sdio_irq_nolock(host, true); 2385 spin_unlock_irqrestore(&host->lock, flags); 2386 } 2387 2388 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2389 struct mmc_ios *ios) 2390 { 2391 struct sdhci_host *host = mmc_priv(mmc); 2392 u16 ctrl; 2393 int ret; 2394 2395 /* 2396 * Signal Voltage Switching is only applicable for Host Controllers 2397 * v3.00 and above. 2398 */ 2399 if (host->version < SDHCI_SPEC_300) 2400 return 0; 2401 2402 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2403 2404 switch (ios->signal_voltage) { 2405 case MMC_SIGNAL_VOLTAGE_330: 2406 if (!(host->flags & SDHCI_SIGNALING_330)) 2407 return -EINVAL; 2408 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2409 ctrl &= ~SDHCI_CTRL_VDD_180; 2410 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2411 2412 if (!IS_ERR(mmc->supply.vqmmc)) { 2413 ret = mmc_regulator_set_vqmmc(mmc, ios); 2414 if (ret) { 2415 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2416 mmc_hostname(mmc)); 2417 return -EIO; 2418 } 2419 } 2420 /* Wait for 5ms */ 2421 usleep_range(5000, 5500); 2422 2423 /* 3.3V regulator output should be stable within 5 ms */ 2424 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2425 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2426 return 0; 2427 2428 pr_warn("%s: 3.3V regulator output did not become stable\n", 2429 mmc_hostname(mmc)); 2430 2431 return -EAGAIN; 2432 case MMC_SIGNAL_VOLTAGE_180: 2433 if (!(host->flags & SDHCI_SIGNALING_180)) 2434 return -EINVAL; 2435 if (!IS_ERR(mmc->supply.vqmmc)) { 2436 ret = mmc_regulator_set_vqmmc(mmc, ios); 2437 if (ret) { 2438 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2439 mmc_hostname(mmc)); 2440 return -EIO; 2441 } 2442 } 2443 2444 /* 2445 * Enable 1.8V Signal Enable in the Host Control2 2446 * register 2447 */ 2448 ctrl |= SDHCI_CTRL_VDD_180; 2449 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2450 2451 /* Some controller need to do more when switching */ 2452 if (host->ops->voltage_switch) 2453 host->ops->voltage_switch(host); 2454 2455 /* 1.8V regulator output should be stable within 5 ms */ 2456 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2457 if (ctrl & SDHCI_CTRL_VDD_180) 2458 return 0; 2459 2460 pr_warn("%s: 1.8V regulator output did not become stable\n", 2461 mmc_hostname(mmc)); 2462 2463 return -EAGAIN; 2464 case MMC_SIGNAL_VOLTAGE_120: 2465 if (!(host->flags & SDHCI_SIGNALING_120)) 2466 return -EINVAL; 2467 if (!IS_ERR(mmc->supply.vqmmc)) { 2468 ret = mmc_regulator_set_vqmmc(mmc, ios); 2469 if (ret) { 2470 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2471 mmc_hostname(mmc)); 2472 return -EIO; 2473 } 2474 } 2475 return 0; 2476 default: 2477 /* No signal voltage switch required */ 2478 return 0; 2479 } 2480 } 2481 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2482 2483 static int sdhci_card_busy(struct mmc_host *mmc) 2484 { 2485 struct sdhci_host *host = mmc_priv(mmc); 2486 u32 present_state; 2487 2488 /* Check whether DAT[0] is 0 */ 2489 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2490 2491 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2492 } 2493 2494 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2495 { 2496 struct sdhci_host *host = mmc_priv(mmc); 2497 unsigned long flags; 2498 2499 spin_lock_irqsave(&host->lock, flags); 2500 host->flags |= SDHCI_HS400_TUNING; 2501 spin_unlock_irqrestore(&host->lock, flags); 2502 2503 return 0; 2504 } 2505 2506 void sdhci_start_tuning(struct sdhci_host *host) 2507 { 2508 u16 ctrl; 2509 2510 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2511 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2512 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2513 ctrl |= SDHCI_CTRL_TUNED_CLK; 2514 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2515 2516 /* 2517 * As per the Host Controller spec v3.00, tuning command 2518 * generates Buffer Read Ready interrupt, so enable that. 2519 * 2520 * Note: The spec clearly says that when tuning sequence 2521 * is being performed, the controller does not generate 2522 * interrupts other than Buffer Read Ready interrupt. But 2523 * to make sure we don't hit a controller bug, we _only_ 2524 * enable Buffer Read Ready interrupt here. 2525 */ 2526 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2527 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2528 } 2529 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2530 2531 void sdhci_end_tuning(struct sdhci_host *host) 2532 { 2533 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2534 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2535 } 2536 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2537 2538 void sdhci_reset_tuning(struct sdhci_host *host) 2539 { 2540 u16 ctrl; 2541 2542 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2543 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2544 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2545 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2546 } 2547 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2548 2549 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2550 { 2551 sdhci_reset_tuning(host); 2552 2553 sdhci_do_reset(host, SDHCI_RESET_CMD); 2554 sdhci_do_reset(host, SDHCI_RESET_DATA); 2555 2556 sdhci_end_tuning(host); 2557 2558 mmc_abort_tuning(host->mmc, opcode); 2559 } 2560 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2561 2562 /* 2563 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2564 * tuning command does not have a data payload (or rather the hardware does it 2565 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2566 * interrupt setup is different to other commands and there is no timeout 2567 * interrupt so special handling is needed. 2568 */ 2569 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2570 { 2571 struct mmc_host *mmc = host->mmc; 2572 struct mmc_command cmd = {}; 2573 struct mmc_request mrq = {}; 2574 unsigned long flags; 2575 u32 b = host->sdma_boundary; 2576 2577 spin_lock_irqsave(&host->lock, flags); 2578 2579 cmd.opcode = opcode; 2580 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2581 cmd.mrq = &mrq; 2582 2583 mrq.cmd = &cmd; 2584 /* 2585 * In response to CMD19, the card sends 64 bytes of tuning 2586 * block to the Host Controller. So we set the block size 2587 * to 64 here. 2588 */ 2589 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2590 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2591 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2592 else 2593 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2594 2595 /* 2596 * The tuning block is sent by the card to the host controller. 2597 * So we set the TRNS_READ bit in the Transfer Mode register. 2598 * This also takes care of setting DMA Enable and Multi Block 2599 * Select in the same register to 0. 2600 */ 2601 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2602 2603 sdhci_send_command(host, &cmd); 2604 2605 host->cmd = NULL; 2606 2607 sdhci_del_timer(host, &mrq); 2608 2609 host->tuning_done = 0; 2610 2611 spin_unlock_irqrestore(&host->lock, flags); 2612 2613 /* Wait for Buffer Read Ready interrupt */ 2614 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2615 msecs_to_jiffies(50)); 2616 2617 } 2618 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2619 2620 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2621 { 2622 int i; 2623 2624 /* 2625 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2626 * of loops reaches tuning loop count. 2627 */ 2628 for (i = 0; i < host->tuning_loop_count; i++) { 2629 u16 ctrl; 2630 2631 sdhci_send_tuning(host, opcode); 2632 2633 if (!host->tuning_done) { 2634 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2635 mmc_hostname(host->mmc)); 2636 sdhci_abort_tuning(host, opcode); 2637 return -ETIMEDOUT; 2638 } 2639 2640 /* Spec does not require a delay between tuning cycles */ 2641 if (host->tuning_delay > 0) 2642 mdelay(host->tuning_delay); 2643 2644 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2645 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2646 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2647 return 0; /* Success! */ 2648 break; 2649 } 2650 2651 } 2652 2653 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2654 mmc_hostname(host->mmc)); 2655 sdhci_reset_tuning(host); 2656 return -EAGAIN; 2657 } 2658 2659 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2660 { 2661 struct sdhci_host *host = mmc_priv(mmc); 2662 int err = 0; 2663 unsigned int tuning_count = 0; 2664 bool hs400_tuning; 2665 2666 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2667 2668 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2669 tuning_count = host->tuning_count; 2670 2671 /* 2672 * The Host Controller needs tuning in case of SDR104 and DDR50 2673 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2674 * the Capabilities register. 2675 * If the Host Controller supports the HS200 mode then the 2676 * tuning function has to be executed. 2677 */ 2678 switch (host->timing) { 2679 /* HS400 tuning is done in HS200 mode */ 2680 case MMC_TIMING_MMC_HS400: 2681 err = -EINVAL; 2682 goto out; 2683 2684 case MMC_TIMING_MMC_HS200: 2685 /* 2686 * Periodic re-tuning for HS400 is not expected to be needed, so 2687 * disable it here. 2688 */ 2689 if (hs400_tuning) 2690 tuning_count = 0; 2691 break; 2692 2693 case MMC_TIMING_UHS_SDR104: 2694 case MMC_TIMING_UHS_DDR50: 2695 break; 2696 2697 case MMC_TIMING_UHS_SDR50: 2698 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2699 break; 2700 /* FALLTHROUGH */ 2701 2702 default: 2703 goto out; 2704 } 2705 2706 if (host->ops->platform_execute_tuning) { 2707 err = host->ops->platform_execute_tuning(host, opcode); 2708 goto out; 2709 } 2710 2711 host->mmc->retune_period = tuning_count; 2712 2713 if (host->tuning_delay < 0) 2714 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2715 2716 sdhci_start_tuning(host); 2717 2718 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2719 2720 sdhci_end_tuning(host); 2721 out: 2722 host->flags &= ~SDHCI_HS400_TUNING; 2723 2724 return err; 2725 } 2726 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2727 2728 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2729 { 2730 /* Host Controller v3.00 defines preset value registers */ 2731 if (host->version < SDHCI_SPEC_300) 2732 return; 2733 2734 /* 2735 * We only enable or disable Preset Value if they are not already 2736 * enabled or disabled respectively. Otherwise, we bail out. 2737 */ 2738 if (host->preset_enabled != enable) { 2739 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2740 2741 if (enable) 2742 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2743 else 2744 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2745 2746 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2747 2748 if (enable) 2749 host->flags |= SDHCI_PV_ENABLED; 2750 else 2751 host->flags &= ~SDHCI_PV_ENABLED; 2752 2753 host->preset_enabled = enable; 2754 } 2755 } 2756 2757 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2758 int err) 2759 { 2760 struct sdhci_host *host = mmc_priv(mmc); 2761 struct mmc_data *data = mrq->data; 2762 2763 if (data->host_cookie != COOKIE_UNMAPPED) 2764 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2765 mmc_get_dma_dir(data)); 2766 2767 data->host_cookie = COOKIE_UNMAPPED; 2768 } 2769 2770 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2771 { 2772 struct sdhci_host *host = mmc_priv(mmc); 2773 2774 mrq->data->host_cookie = COOKIE_UNMAPPED; 2775 2776 /* 2777 * No pre-mapping in the pre hook if we're using the bounce buffer, 2778 * for that we would need two bounce buffers since one buffer is 2779 * in flight when this is getting called. 2780 */ 2781 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2782 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2783 } 2784 2785 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2786 { 2787 if (host->data_cmd) { 2788 host->data_cmd->error = err; 2789 sdhci_finish_mrq(host, host->data_cmd->mrq); 2790 } 2791 2792 if (host->cmd) { 2793 host->cmd->error = err; 2794 sdhci_finish_mrq(host, host->cmd->mrq); 2795 } 2796 } 2797 2798 static void sdhci_card_event(struct mmc_host *mmc) 2799 { 2800 struct sdhci_host *host = mmc_priv(mmc); 2801 unsigned long flags; 2802 int present; 2803 2804 /* First check if client has provided their own card event */ 2805 if (host->ops->card_event) 2806 host->ops->card_event(host); 2807 2808 present = mmc->ops->get_cd(mmc); 2809 2810 spin_lock_irqsave(&host->lock, flags); 2811 2812 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2813 if (sdhci_has_requests(host) && !present) { 2814 pr_err("%s: Card removed during transfer!\n", 2815 mmc_hostname(host->mmc)); 2816 pr_err("%s: Resetting controller.\n", 2817 mmc_hostname(host->mmc)); 2818 2819 sdhci_do_reset(host, SDHCI_RESET_CMD); 2820 sdhci_do_reset(host, SDHCI_RESET_DATA); 2821 2822 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2823 } 2824 2825 spin_unlock_irqrestore(&host->lock, flags); 2826 } 2827 2828 static const struct mmc_host_ops sdhci_ops = { 2829 .request = sdhci_request, 2830 .post_req = sdhci_post_req, 2831 .pre_req = sdhci_pre_req, 2832 .set_ios = sdhci_set_ios, 2833 .get_cd = sdhci_get_cd, 2834 .get_ro = sdhci_get_ro, 2835 .hw_reset = sdhci_hw_reset, 2836 .enable_sdio_irq = sdhci_enable_sdio_irq, 2837 .ack_sdio_irq = sdhci_ack_sdio_irq, 2838 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2839 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2840 .execute_tuning = sdhci_execute_tuning, 2841 .card_event = sdhci_card_event, 2842 .card_busy = sdhci_card_busy, 2843 }; 2844 2845 /*****************************************************************************\ 2846 * * 2847 * Request done * 2848 * * 2849 \*****************************************************************************/ 2850 2851 static bool sdhci_request_done(struct sdhci_host *host) 2852 { 2853 unsigned long flags; 2854 struct mmc_request *mrq; 2855 int i; 2856 2857 spin_lock_irqsave(&host->lock, flags); 2858 2859 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2860 mrq = host->mrqs_done[i]; 2861 if (mrq) 2862 break; 2863 } 2864 2865 if (!mrq) { 2866 spin_unlock_irqrestore(&host->lock, flags); 2867 return true; 2868 } 2869 2870 /* 2871 * Always unmap the data buffers if they were mapped by 2872 * sdhci_prepare_data() whenever we finish with a request. 2873 * This avoids leaking DMA mappings on error. 2874 */ 2875 if (host->flags & SDHCI_REQ_USE_DMA) { 2876 struct mmc_data *data = mrq->data; 2877 2878 if (host->use_external_dma && data && 2879 (mrq->cmd->error || data->error)) { 2880 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 2881 2882 host->mrqs_done[i] = NULL; 2883 spin_unlock_irqrestore(&host->lock, flags); 2884 dmaengine_terminate_sync(chan); 2885 spin_lock_irqsave(&host->lock, flags); 2886 sdhci_set_mrq_done(host, mrq); 2887 } 2888 2889 if (data && data->host_cookie == COOKIE_MAPPED) { 2890 if (host->bounce_buffer) { 2891 /* 2892 * On reads, copy the bounced data into the 2893 * sglist 2894 */ 2895 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 2896 unsigned int length = data->bytes_xfered; 2897 2898 if (length > host->bounce_buffer_size) { 2899 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 2900 mmc_hostname(host->mmc), 2901 host->bounce_buffer_size, 2902 data->bytes_xfered); 2903 /* Cap it down and continue */ 2904 length = host->bounce_buffer_size; 2905 } 2906 dma_sync_single_for_cpu( 2907 host->mmc->parent, 2908 host->bounce_addr, 2909 host->bounce_buffer_size, 2910 DMA_FROM_DEVICE); 2911 sg_copy_from_buffer(data->sg, 2912 data->sg_len, 2913 host->bounce_buffer, 2914 length); 2915 } else { 2916 /* No copying, just switch ownership */ 2917 dma_sync_single_for_cpu( 2918 host->mmc->parent, 2919 host->bounce_addr, 2920 host->bounce_buffer_size, 2921 mmc_get_dma_dir(data)); 2922 } 2923 } else { 2924 /* Unmap the raw data */ 2925 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 2926 data->sg_len, 2927 mmc_get_dma_dir(data)); 2928 } 2929 data->host_cookie = COOKIE_UNMAPPED; 2930 } 2931 } 2932 2933 /* 2934 * The controller needs a reset of internal state machines 2935 * upon error conditions. 2936 */ 2937 if (sdhci_needs_reset(host, mrq)) { 2938 /* 2939 * Do not finish until command and data lines are available for 2940 * reset. Note there can only be one other mrq, so it cannot 2941 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 2942 * would both be null. 2943 */ 2944 if (host->cmd || host->data_cmd) { 2945 spin_unlock_irqrestore(&host->lock, flags); 2946 return true; 2947 } 2948 2949 /* Some controllers need this kick or reset won't work here */ 2950 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2951 /* This is to force an update */ 2952 host->ops->set_clock(host, host->clock); 2953 2954 /* Spec says we should do both at the same time, but Ricoh 2955 controllers do not like that. */ 2956 sdhci_do_reset(host, SDHCI_RESET_CMD); 2957 sdhci_do_reset(host, SDHCI_RESET_DATA); 2958 2959 host->pending_reset = false; 2960 } 2961 2962 host->mrqs_done[i] = NULL; 2963 2964 spin_unlock_irqrestore(&host->lock, flags); 2965 2966 if (host->ops->request_done) 2967 host->ops->request_done(host, mrq); 2968 else 2969 mmc_request_done(host->mmc, mrq); 2970 2971 return false; 2972 } 2973 2974 static void sdhci_complete_work(struct work_struct *work) 2975 { 2976 struct sdhci_host *host = container_of(work, struct sdhci_host, 2977 complete_work); 2978 2979 while (!sdhci_request_done(host)) 2980 ; 2981 } 2982 2983 static void sdhci_timeout_timer(struct timer_list *t) 2984 { 2985 struct sdhci_host *host; 2986 unsigned long flags; 2987 2988 host = from_timer(host, t, timer); 2989 2990 spin_lock_irqsave(&host->lock, flags); 2991 2992 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 2993 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 2994 mmc_hostname(host->mmc)); 2995 sdhci_dumpregs(host); 2996 2997 host->cmd->error = -ETIMEDOUT; 2998 sdhci_finish_mrq(host, host->cmd->mrq); 2999 } 3000 3001 spin_unlock_irqrestore(&host->lock, flags); 3002 } 3003 3004 static void sdhci_timeout_data_timer(struct timer_list *t) 3005 { 3006 struct sdhci_host *host; 3007 unsigned long flags; 3008 3009 host = from_timer(host, t, data_timer); 3010 3011 spin_lock_irqsave(&host->lock, flags); 3012 3013 if (host->data || host->data_cmd || 3014 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3015 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3016 mmc_hostname(host->mmc)); 3017 sdhci_dumpregs(host); 3018 3019 if (host->data) { 3020 host->data->error = -ETIMEDOUT; 3021 sdhci_finish_data(host); 3022 queue_work(host->complete_wq, &host->complete_work); 3023 } else if (host->data_cmd) { 3024 host->data_cmd->error = -ETIMEDOUT; 3025 sdhci_finish_mrq(host, host->data_cmd->mrq); 3026 } else { 3027 host->cmd->error = -ETIMEDOUT; 3028 sdhci_finish_mrq(host, host->cmd->mrq); 3029 } 3030 } 3031 3032 spin_unlock_irqrestore(&host->lock, flags); 3033 } 3034 3035 /*****************************************************************************\ 3036 * * 3037 * Interrupt handling * 3038 * * 3039 \*****************************************************************************/ 3040 3041 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3042 { 3043 /* Handle auto-CMD12 error */ 3044 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3045 struct mmc_request *mrq = host->data_cmd->mrq; 3046 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3047 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3048 SDHCI_INT_DATA_TIMEOUT : 3049 SDHCI_INT_DATA_CRC; 3050 3051 /* Treat auto-CMD12 error the same as data error */ 3052 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3053 *intmask_p |= data_err_bit; 3054 return; 3055 } 3056 } 3057 3058 if (!host->cmd) { 3059 /* 3060 * SDHCI recovers from errors by resetting the cmd and data 3061 * circuits. Until that is done, there very well might be more 3062 * interrupts, so ignore them in that case. 3063 */ 3064 if (host->pending_reset) 3065 return; 3066 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3067 mmc_hostname(host->mmc), (unsigned)intmask); 3068 sdhci_dumpregs(host); 3069 return; 3070 } 3071 3072 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3073 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3074 if (intmask & SDHCI_INT_TIMEOUT) 3075 host->cmd->error = -ETIMEDOUT; 3076 else 3077 host->cmd->error = -EILSEQ; 3078 3079 /* Treat data command CRC error the same as data CRC error */ 3080 if (host->cmd->data && 3081 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3082 SDHCI_INT_CRC) { 3083 host->cmd = NULL; 3084 *intmask_p |= SDHCI_INT_DATA_CRC; 3085 return; 3086 } 3087 3088 __sdhci_finish_mrq(host, host->cmd->mrq); 3089 return; 3090 } 3091 3092 /* Handle auto-CMD23 error */ 3093 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3094 struct mmc_request *mrq = host->cmd->mrq; 3095 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3096 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3097 -ETIMEDOUT : 3098 -EILSEQ; 3099 3100 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 3101 mrq->sbc->error = err; 3102 __sdhci_finish_mrq(host, mrq); 3103 return; 3104 } 3105 } 3106 3107 if (intmask & SDHCI_INT_RESPONSE) 3108 sdhci_finish_command(host); 3109 } 3110 3111 static void sdhci_adma_show_error(struct sdhci_host *host) 3112 { 3113 void *desc = host->adma_table; 3114 dma_addr_t dma = host->adma_addr; 3115 3116 sdhci_dumpregs(host); 3117 3118 while (true) { 3119 struct sdhci_adma2_64_desc *dma_desc = desc; 3120 3121 if (host->flags & SDHCI_USE_64_BIT_DMA) 3122 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3123 (unsigned long long)dma, 3124 le32_to_cpu(dma_desc->addr_hi), 3125 le32_to_cpu(dma_desc->addr_lo), 3126 le16_to_cpu(dma_desc->len), 3127 le16_to_cpu(dma_desc->cmd)); 3128 else 3129 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3130 (unsigned long long)dma, 3131 le32_to_cpu(dma_desc->addr_lo), 3132 le16_to_cpu(dma_desc->len), 3133 le16_to_cpu(dma_desc->cmd)); 3134 3135 desc += host->desc_sz; 3136 dma += host->desc_sz; 3137 3138 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3139 break; 3140 } 3141 } 3142 3143 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3144 { 3145 u32 command; 3146 3147 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 3148 if (intmask & SDHCI_INT_DATA_AVAIL) { 3149 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 3150 if (command == MMC_SEND_TUNING_BLOCK || 3151 command == MMC_SEND_TUNING_BLOCK_HS200) { 3152 host->tuning_done = 1; 3153 wake_up(&host->buf_ready_int); 3154 return; 3155 } 3156 } 3157 3158 if (!host->data) { 3159 struct mmc_command *data_cmd = host->data_cmd; 3160 3161 /* 3162 * The "data complete" interrupt is also used to 3163 * indicate that a busy state has ended. See comment 3164 * above in sdhci_cmd_irq(). 3165 */ 3166 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3167 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3168 host->data_cmd = NULL; 3169 data_cmd->error = -ETIMEDOUT; 3170 __sdhci_finish_mrq(host, data_cmd->mrq); 3171 return; 3172 } 3173 if (intmask & SDHCI_INT_DATA_END) { 3174 host->data_cmd = NULL; 3175 /* 3176 * Some cards handle busy-end interrupt 3177 * before the command completed, so make 3178 * sure we do things in the proper order. 3179 */ 3180 if (host->cmd == data_cmd) 3181 return; 3182 3183 __sdhci_finish_mrq(host, data_cmd->mrq); 3184 return; 3185 } 3186 } 3187 3188 /* 3189 * SDHCI recovers from errors by resetting the cmd and data 3190 * circuits. Until that is done, there very well might be more 3191 * interrupts, so ignore them in that case. 3192 */ 3193 if (host->pending_reset) 3194 return; 3195 3196 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3197 mmc_hostname(host->mmc), (unsigned)intmask); 3198 sdhci_dumpregs(host); 3199 3200 return; 3201 } 3202 3203 if (intmask & SDHCI_INT_DATA_TIMEOUT) 3204 host->data->error = -ETIMEDOUT; 3205 else if (intmask & SDHCI_INT_DATA_END_BIT) 3206 host->data->error = -EILSEQ; 3207 else if ((intmask & SDHCI_INT_DATA_CRC) && 3208 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3209 != MMC_BUS_TEST_R) 3210 host->data->error = -EILSEQ; 3211 else if (intmask & SDHCI_INT_ADMA_ERROR) { 3212 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3213 intmask); 3214 sdhci_adma_show_error(host); 3215 host->data->error = -EIO; 3216 if (host->ops->adma_workaround) 3217 host->ops->adma_workaround(host, intmask); 3218 } 3219 3220 if (host->data->error) 3221 sdhci_finish_data(host); 3222 else { 3223 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3224 sdhci_transfer_pio(host); 3225 3226 /* 3227 * We currently don't do anything fancy with DMA 3228 * boundaries, but as we can't disable the feature 3229 * we need to at least restart the transfer. 3230 * 3231 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3232 * should return a valid address to continue from, but as 3233 * some controllers are faulty, don't trust them. 3234 */ 3235 if (intmask & SDHCI_INT_DMA_END) { 3236 dma_addr_t dmastart, dmanow; 3237 3238 dmastart = sdhci_sdma_address(host); 3239 dmanow = dmastart + host->data->bytes_xfered; 3240 /* 3241 * Force update to the next DMA block boundary. 3242 */ 3243 dmanow = (dmanow & 3244 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3245 SDHCI_DEFAULT_BOUNDARY_SIZE; 3246 host->data->bytes_xfered = dmanow - dmastart; 3247 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3248 &dmastart, host->data->bytes_xfered, &dmanow); 3249 sdhci_set_sdma_addr(host, dmanow); 3250 } 3251 3252 if (intmask & SDHCI_INT_DATA_END) { 3253 if (host->cmd == host->data_cmd) { 3254 /* 3255 * Data managed to finish before the 3256 * command completed. Make sure we do 3257 * things in the proper order. 3258 */ 3259 host->data_early = 1; 3260 } else { 3261 sdhci_finish_data(host); 3262 } 3263 } 3264 } 3265 } 3266 3267 static inline bool sdhci_defer_done(struct sdhci_host *host, 3268 struct mmc_request *mrq) 3269 { 3270 struct mmc_data *data = mrq->data; 3271 3272 return host->pending_reset || host->always_defer_done || 3273 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3274 data->host_cookie == COOKIE_MAPPED); 3275 } 3276 3277 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3278 { 3279 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3280 irqreturn_t result = IRQ_NONE; 3281 struct sdhci_host *host = dev_id; 3282 u32 intmask, mask, unexpected = 0; 3283 int max_loops = 16; 3284 int i; 3285 3286 spin_lock(&host->lock); 3287 3288 if (host->runtime_suspended) { 3289 spin_unlock(&host->lock); 3290 return IRQ_NONE; 3291 } 3292 3293 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3294 if (!intmask || intmask == 0xffffffff) { 3295 result = IRQ_NONE; 3296 goto out; 3297 } 3298 3299 do { 3300 DBG("IRQ status 0x%08x\n", intmask); 3301 3302 if (host->ops->irq) { 3303 intmask = host->ops->irq(host, intmask); 3304 if (!intmask) 3305 goto cont; 3306 } 3307 3308 /* Clear selected interrupts. */ 3309 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3310 SDHCI_INT_BUS_POWER); 3311 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3312 3313 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3314 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3315 SDHCI_CARD_PRESENT; 3316 3317 /* 3318 * There is a observation on i.mx esdhc. INSERT 3319 * bit will be immediately set again when it gets 3320 * cleared, if a card is inserted. We have to mask 3321 * the irq to prevent interrupt storm which will 3322 * freeze the system. And the REMOVE gets the 3323 * same situation. 3324 * 3325 * More testing are needed here to ensure it works 3326 * for other platforms though. 3327 */ 3328 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3329 SDHCI_INT_CARD_REMOVE); 3330 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3331 SDHCI_INT_CARD_INSERT; 3332 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3333 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3334 3335 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3336 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3337 3338 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3339 SDHCI_INT_CARD_REMOVE); 3340 result = IRQ_WAKE_THREAD; 3341 } 3342 3343 if (intmask & SDHCI_INT_CMD_MASK) 3344 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3345 3346 if (intmask & SDHCI_INT_DATA_MASK) 3347 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3348 3349 if (intmask & SDHCI_INT_BUS_POWER) 3350 pr_err("%s: Card is consuming too much power!\n", 3351 mmc_hostname(host->mmc)); 3352 3353 if (intmask & SDHCI_INT_RETUNE) 3354 mmc_retune_needed(host->mmc); 3355 3356 if ((intmask & SDHCI_INT_CARD_INT) && 3357 (host->ier & SDHCI_INT_CARD_INT)) { 3358 sdhci_enable_sdio_irq_nolock(host, false); 3359 sdio_signal_irq(host->mmc); 3360 } 3361 3362 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3363 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3364 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3365 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3366 3367 if (intmask) { 3368 unexpected |= intmask; 3369 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3370 } 3371 cont: 3372 if (result == IRQ_NONE) 3373 result = IRQ_HANDLED; 3374 3375 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3376 } while (intmask && --max_loops); 3377 3378 /* Determine if mrqs can be completed immediately */ 3379 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3380 struct mmc_request *mrq = host->mrqs_done[i]; 3381 3382 if (!mrq) 3383 continue; 3384 3385 if (sdhci_defer_done(host, mrq)) { 3386 result = IRQ_WAKE_THREAD; 3387 } else { 3388 mrqs_done[i] = mrq; 3389 host->mrqs_done[i] = NULL; 3390 } 3391 } 3392 out: 3393 spin_unlock(&host->lock); 3394 3395 /* Process mrqs ready for immediate completion */ 3396 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3397 if (!mrqs_done[i]) 3398 continue; 3399 3400 if (host->ops->request_done) 3401 host->ops->request_done(host, mrqs_done[i]); 3402 else 3403 mmc_request_done(host->mmc, mrqs_done[i]); 3404 } 3405 3406 if (unexpected) { 3407 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3408 mmc_hostname(host->mmc), unexpected); 3409 sdhci_dumpregs(host); 3410 } 3411 3412 return result; 3413 } 3414 3415 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3416 { 3417 struct sdhci_host *host = dev_id; 3418 unsigned long flags; 3419 u32 isr; 3420 3421 while (!sdhci_request_done(host)) 3422 ; 3423 3424 spin_lock_irqsave(&host->lock, flags); 3425 isr = host->thread_isr; 3426 host->thread_isr = 0; 3427 spin_unlock_irqrestore(&host->lock, flags); 3428 3429 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3430 struct mmc_host *mmc = host->mmc; 3431 3432 mmc->ops->card_event(mmc); 3433 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3434 } 3435 3436 return IRQ_HANDLED; 3437 } 3438 3439 /*****************************************************************************\ 3440 * * 3441 * Suspend/resume * 3442 * * 3443 \*****************************************************************************/ 3444 3445 #ifdef CONFIG_PM 3446 3447 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3448 { 3449 return mmc_card_is_removable(host->mmc) && 3450 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3451 !mmc_can_gpio_cd(host->mmc); 3452 } 3453 3454 /* 3455 * To enable wakeup events, the corresponding events have to be enabled in 3456 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3457 * Table' in the SD Host Controller Standard Specification. 3458 * It is useless to restore SDHCI_INT_ENABLE state in 3459 * sdhci_disable_irq_wakeups() since it will be set by 3460 * sdhci_enable_card_detection() or sdhci_init(). 3461 */ 3462 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3463 { 3464 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3465 SDHCI_WAKE_ON_INT; 3466 u32 irq_val = 0; 3467 u8 wake_val = 0; 3468 u8 val; 3469 3470 if (sdhci_cd_irq_can_wakeup(host)) { 3471 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3472 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3473 } 3474 3475 if (mmc_card_wake_sdio_irq(host->mmc)) { 3476 wake_val |= SDHCI_WAKE_ON_INT; 3477 irq_val |= SDHCI_INT_CARD_INT; 3478 } 3479 3480 if (!irq_val) 3481 return false; 3482 3483 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3484 val &= ~mask; 3485 val |= wake_val; 3486 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3487 3488 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3489 3490 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3491 3492 return host->irq_wake_enabled; 3493 } 3494 3495 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3496 { 3497 u8 val; 3498 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3499 | SDHCI_WAKE_ON_INT; 3500 3501 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3502 val &= ~mask; 3503 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3504 3505 disable_irq_wake(host->irq); 3506 3507 host->irq_wake_enabled = false; 3508 } 3509 3510 int sdhci_suspend_host(struct sdhci_host *host) 3511 { 3512 sdhci_disable_card_detection(host); 3513 3514 mmc_retune_timer_stop(host->mmc); 3515 3516 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3517 !sdhci_enable_irq_wakeups(host)) { 3518 host->ier = 0; 3519 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3520 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3521 free_irq(host->irq, host); 3522 } 3523 3524 return 0; 3525 } 3526 3527 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3528 3529 int sdhci_resume_host(struct sdhci_host *host) 3530 { 3531 struct mmc_host *mmc = host->mmc; 3532 int ret = 0; 3533 3534 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3535 if (host->ops->enable_dma) 3536 host->ops->enable_dma(host); 3537 } 3538 3539 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 3540 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3541 /* Card keeps power but host controller does not */ 3542 sdhci_init(host, 0); 3543 host->pwr = 0; 3544 host->clock = 0; 3545 mmc->ops->set_ios(mmc, &mmc->ios); 3546 } else { 3547 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 3548 } 3549 3550 if (host->irq_wake_enabled) { 3551 sdhci_disable_irq_wakeups(host); 3552 } else { 3553 ret = request_threaded_irq(host->irq, sdhci_irq, 3554 sdhci_thread_irq, IRQF_SHARED, 3555 mmc_hostname(host->mmc), host); 3556 if (ret) 3557 return ret; 3558 } 3559 3560 sdhci_enable_card_detection(host); 3561 3562 return ret; 3563 } 3564 3565 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3566 3567 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3568 { 3569 unsigned long flags; 3570 3571 mmc_retune_timer_stop(host->mmc); 3572 3573 spin_lock_irqsave(&host->lock, flags); 3574 host->ier &= SDHCI_INT_CARD_INT; 3575 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3576 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3577 spin_unlock_irqrestore(&host->lock, flags); 3578 3579 synchronize_hardirq(host->irq); 3580 3581 spin_lock_irqsave(&host->lock, flags); 3582 host->runtime_suspended = true; 3583 spin_unlock_irqrestore(&host->lock, flags); 3584 3585 return 0; 3586 } 3587 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3588 3589 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3590 { 3591 struct mmc_host *mmc = host->mmc; 3592 unsigned long flags; 3593 int host_flags = host->flags; 3594 3595 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3596 if (host->ops->enable_dma) 3597 host->ops->enable_dma(host); 3598 } 3599 3600 sdhci_init(host, soft_reset); 3601 3602 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3603 mmc->ios.power_mode != MMC_POWER_OFF) { 3604 /* Force clock and power re-program */ 3605 host->pwr = 0; 3606 host->clock = 0; 3607 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3608 mmc->ops->set_ios(mmc, &mmc->ios); 3609 3610 if ((host_flags & SDHCI_PV_ENABLED) && 3611 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3612 spin_lock_irqsave(&host->lock, flags); 3613 sdhci_enable_preset_value(host, true); 3614 spin_unlock_irqrestore(&host->lock, flags); 3615 } 3616 3617 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3618 mmc->ops->hs400_enhanced_strobe) 3619 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3620 } 3621 3622 spin_lock_irqsave(&host->lock, flags); 3623 3624 host->runtime_suspended = false; 3625 3626 /* Enable SDIO IRQ */ 3627 if (sdio_irq_claimed(mmc)) 3628 sdhci_enable_sdio_irq_nolock(host, true); 3629 3630 /* Enable Card Detection */ 3631 sdhci_enable_card_detection(host); 3632 3633 spin_unlock_irqrestore(&host->lock, flags); 3634 3635 return 0; 3636 } 3637 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3638 3639 #endif /* CONFIG_PM */ 3640 3641 /*****************************************************************************\ 3642 * * 3643 * Command Queue Engine (CQE) helpers * 3644 * * 3645 \*****************************************************************************/ 3646 3647 void sdhci_cqe_enable(struct mmc_host *mmc) 3648 { 3649 struct sdhci_host *host = mmc_priv(mmc); 3650 unsigned long flags; 3651 u8 ctrl; 3652 3653 spin_lock_irqsave(&host->lock, flags); 3654 3655 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3656 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3657 /* 3658 * Host from V4.10 supports ADMA3 DMA type. 3659 * ADMA3 performs integrated descriptor which is more suitable 3660 * for cmd queuing to fetch both command and transfer descriptors. 3661 */ 3662 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3663 ctrl |= SDHCI_CTRL_ADMA3; 3664 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3665 ctrl |= SDHCI_CTRL_ADMA64; 3666 else 3667 ctrl |= SDHCI_CTRL_ADMA32; 3668 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3669 3670 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3671 SDHCI_BLOCK_SIZE); 3672 3673 /* Set maximum timeout */ 3674 sdhci_set_timeout(host, NULL); 3675 3676 host->ier = host->cqe_ier; 3677 3678 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3679 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3680 3681 host->cqe_on = true; 3682 3683 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3684 mmc_hostname(mmc), host->ier, 3685 sdhci_readl(host, SDHCI_INT_STATUS)); 3686 3687 spin_unlock_irqrestore(&host->lock, flags); 3688 } 3689 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3690 3691 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3692 { 3693 struct sdhci_host *host = mmc_priv(mmc); 3694 unsigned long flags; 3695 3696 spin_lock_irqsave(&host->lock, flags); 3697 3698 sdhci_set_default_irqs(host); 3699 3700 host->cqe_on = false; 3701 3702 if (recovery) { 3703 sdhci_do_reset(host, SDHCI_RESET_CMD); 3704 sdhci_do_reset(host, SDHCI_RESET_DATA); 3705 } 3706 3707 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3708 mmc_hostname(mmc), host->ier, 3709 sdhci_readl(host, SDHCI_INT_STATUS)); 3710 3711 spin_unlock_irqrestore(&host->lock, flags); 3712 } 3713 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3714 3715 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3716 int *data_error) 3717 { 3718 u32 mask; 3719 3720 if (!host->cqe_on) 3721 return false; 3722 3723 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3724 *cmd_error = -EILSEQ; 3725 else if (intmask & SDHCI_INT_TIMEOUT) 3726 *cmd_error = -ETIMEDOUT; 3727 else 3728 *cmd_error = 0; 3729 3730 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3731 *data_error = -EILSEQ; 3732 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3733 *data_error = -ETIMEDOUT; 3734 else if (intmask & SDHCI_INT_ADMA_ERROR) 3735 *data_error = -EIO; 3736 else 3737 *data_error = 0; 3738 3739 /* Clear selected interrupts. */ 3740 mask = intmask & host->cqe_ier; 3741 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3742 3743 if (intmask & SDHCI_INT_BUS_POWER) 3744 pr_err("%s: Card is consuming too much power!\n", 3745 mmc_hostname(host->mmc)); 3746 3747 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3748 if (intmask) { 3749 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3750 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3751 mmc_hostname(host->mmc), intmask); 3752 sdhci_dumpregs(host); 3753 } 3754 3755 return true; 3756 } 3757 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3758 3759 /*****************************************************************************\ 3760 * * 3761 * Device allocation/registration * 3762 * * 3763 \*****************************************************************************/ 3764 3765 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3766 size_t priv_size) 3767 { 3768 struct mmc_host *mmc; 3769 struct sdhci_host *host; 3770 3771 WARN_ON(dev == NULL); 3772 3773 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3774 if (!mmc) 3775 return ERR_PTR(-ENOMEM); 3776 3777 host = mmc_priv(mmc); 3778 host->mmc = mmc; 3779 host->mmc_host_ops = sdhci_ops; 3780 mmc->ops = &host->mmc_host_ops; 3781 3782 host->flags = SDHCI_SIGNALING_330; 3783 3784 host->cqe_ier = SDHCI_CQE_INT_MASK; 3785 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3786 3787 host->tuning_delay = -1; 3788 host->tuning_loop_count = MAX_TUNING_LOOP; 3789 3790 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3791 3792 /* 3793 * The DMA table descriptor count is calculated as the maximum 3794 * number of segments times 2, to allow for an alignment 3795 * descriptor for each segment, plus 1 for a nop end descriptor. 3796 */ 3797 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 3798 3799 return host; 3800 } 3801 3802 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3803 3804 static int sdhci_set_dma_mask(struct sdhci_host *host) 3805 { 3806 struct mmc_host *mmc = host->mmc; 3807 struct device *dev = mmc_dev(mmc); 3808 int ret = -EINVAL; 3809 3810 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3811 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3812 3813 /* Try 64-bit mask if hardware is capable of it */ 3814 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3815 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3816 if (ret) { 3817 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3818 mmc_hostname(mmc)); 3819 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3820 } 3821 } 3822 3823 /* 32-bit mask as default & fallback */ 3824 if (ret) { 3825 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3826 if (ret) 3827 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 3828 mmc_hostname(mmc)); 3829 } 3830 3831 return ret; 3832 } 3833 3834 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 3835 const u32 *caps, const u32 *caps1) 3836 { 3837 u16 v; 3838 u64 dt_caps_mask = 0; 3839 u64 dt_caps = 0; 3840 3841 if (host->read_caps) 3842 return; 3843 3844 host->read_caps = true; 3845 3846 if (debug_quirks) 3847 host->quirks = debug_quirks; 3848 3849 if (debug_quirks2) 3850 host->quirks2 = debug_quirks2; 3851 3852 sdhci_do_reset(host, SDHCI_RESET_ALL); 3853 3854 if (host->v4_mode) 3855 sdhci_do_enable_v4_mode(host); 3856 3857 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3858 "sdhci-caps-mask", &dt_caps_mask); 3859 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3860 "sdhci-caps", &dt_caps); 3861 3862 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 3863 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 3864 3865 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 3866 return; 3867 3868 if (caps) { 3869 host->caps = *caps; 3870 } else { 3871 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 3872 host->caps &= ~lower_32_bits(dt_caps_mask); 3873 host->caps |= lower_32_bits(dt_caps); 3874 } 3875 3876 if (host->version < SDHCI_SPEC_300) 3877 return; 3878 3879 if (caps1) { 3880 host->caps1 = *caps1; 3881 } else { 3882 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 3883 host->caps1 &= ~upper_32_bits(dt_caps_mask); 3884 host->caps1 |= upper_32_bits(dt_caps); 3885 } 3886 } 3887 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 3888 3889 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 3890 { 3891 struct mmc_host *mmc = host->mmc; 3892 unsigned int max_blocks; 3893 unsigned int bounce_size; 3894 int ret; 3895 3896 /* 3897 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 3898 * has diminishing returns, this is probably because SD/MMC 3899 * cards are usually optimized to handle this size of requests. 3900 */ 3901 bounce_size = SZ_64K; 3902 /* 3903 * Adjust downwards to maximum request size if this is less 3904 * than our segment size, else hammer down the maximum 3905 * request size to the maximum buffer size. 3906 */ 3907 if (mmc->max_req_size < bounce_size) 3908 bounce_size = mmc->max_req_size; 3909 max_blocks = bounce_size / 512; 3910 3911 /* 3912 * When we just support one segment, we can get significant 3913 * speedups by the help of a bounce buffer to group scattered 3914 * reads/writes together. 3915 */ 3916 host->bounce_buffer = devm_kmalloc(mmc->parent, 3917 bounce_size, 3918 GFP_KERNEL); 3919 if (!host->bounce_buffer) { 3920 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 3921 mmc_hostname(mmc), 3922 bounce_size); 3923 /* 3924 * Exiting with zero here makes sure we proceed with 3925 * mmc->max_segs == 1. 3926 */ 3927 return; 3928 } 3929 3930 host->bounce_addr = dma_map_single(mmc->parent, 3931 host->bounce_buffer, 3932 bounce_size, 3933 DMA_BIDIRECTIONAL); 3934 ret = dma_mapping_error(mmc->parent, host->bounce_addr); 3935 if (ret) 3936 /* Again fall back to max_segs == 1 */ 3937 return; 3938 host->bounce_buffer_size = bounce_size; 3939 3940 /* Lie about this since we're bouncing */ 3941 mmc->max_segs = max_blocks; 3942 mmc->max_seg_size = bounce_size; 3943 mmc->max_req_size = bounce_size; 3944 3945 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 3946 mmc_hostname(mmc), max_blocks, bounce_size); 3947 } 3948 3949 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 3950 { 3951 /* 3952 * According to SD Host Controller spec v4.10, bit[27] added from 3953 * version 4.10 in Capabilities Register is used as 64-bit System 3954 * Address support for V4 mode. 3955 */ 3956 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 3957 return host->caps & SDHCI_CAN_64BIT_V4; 3958 3959 return host->caps & SDHCI_CAN_64BIT; 3960 } 3961 3962 int sdhci_setup_host(struct sdhci_host *host) 3963 { 3964 struct mmc_host *mmc; 3965 u32 max_current_caps; 3966 unsigned int ocr_avail; 3967 unsigned int override_timeout_clk; 3968 u32 max_clk; 3969 int ret; 3970 3971 WARN_ON(host == NULL); 3972 if (host == NULL) 3973 return -EINVAL; 3974 3975 mmc = host->mmc; 3976 3977 /* 3978 * If there are external regulators, get them. Note this must be done 3979 * early before resetting the host and reading the capabilities so that 3980 * the host can take the appropriate action if regulators are not 3981 * available. 3982 */ 3983 ret = mmc_regulator_get_supply(mmc); 3984 if (ret) 3985 return ret; 3986 3987 DBG("Version: 0x%08x | Present: 0x%08x\n", 3988 sdhci_readw(host, SDHCI_HOST_VERSION), 3989 sdhci_readl(host, SDHCI_PRESENT_STATE)); 3990 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 3991 sdhci_readl(host, SDHCI_CAPABILITIES), 3992 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 3993 3994 sdhci_read_caps(host); 3995 3996 override_timeout_clk = host->timeout_clk; 3997 3998 if (host->version > SDHCI_SPEC_420) { 3999 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4000 mmc_hostname(mmc), host->version); 4001 } 4002 4003 if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) 4004 mmc->caps2 &= ~MMC_CAP2_CQE; 4005 4006 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4007 host->flags |= SDHCI_USE_SDMA; 4008 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4009 DBG("Controller doesn't have SDMA capability\n"); 4010 else 4011 host->flags |= SDHCI_USE_SDMA; 4012 4013 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4014 (host->flags & SDHCI_USE_SDMA)) { 4015 DBG("Disabling DMA as it is marked broken\n"); 4016 host->flags &= ~SDHCI_USE_SDMA; 4017 } 4018 4019 if ((host->version >= SDHCI_SPEC_200) && 4020 (host->caps & SDHCI_CAN_DO_ADMA2)) 4021 host->flags |= SDHCI_USE_ADMA; 4022 4023 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4024 (host->flags & SDHCI_USE_ADMA)) { 4025 DBG("Disabling ADMA as it is marked broken\n"); 4026 host->flags &= ~SDHCI_USE_ADMA; 4027 } 4028 4029 if (sdhci_can_64bit_dma(host)) 4030 host->flags |= SDHCI_USE_64_BIT_DMA; 4031 4032 if (host->use_external_dma) { 4033 ret = sdhci_external_dma_init(host); 4034 if (ret == -EPROBE_DEFER) 4035 goto unreg; 4036 /* 4037 * Fall back to use the DMA/PIO integrated in standard SDHCI 4038 * instead of external DMA devices. 4039 */ 4040 else if (ret) 4041 sdhci_switch_external_dma(host, false); 4042 /* Disable internal DMA sources */ 4043 else 4044 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4045 } 4046 4047 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4048 if (host->ops->set_dma_mask) 4049 ret = host->ops->set_dma_mask(host); 4050 else 4051 ret = sdhci_set_dma_mask(host); 4052 4053 if (!ret && host->ops->enable_dma) 4054 ret = host->ops->enable_dma(host); 4055 4056 if (ret) { 4057 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4058 mmc_hostname(mmc)); 4059 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4060 4061 ret = 0; 4062 } 4063 } 4064 4065 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4066 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4067 host->flags &= ~SDHCI_USE_SDMA; 4068 4069 if (host->flags & SDHCI_USE_ADMA) { 4070 dma_addr_t dma; 4071 void *buf; 4072 4073 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4074 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4075 else if (!host->alloc_desc_sz) 4076 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4077 4078 host->desc_sz = host->alloc_desc_sz; 4079 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4080 4081 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4082 /* 4083 * Use zalloc to zero the reserved high 32-bits of 128-bit 4084 * descriptors so that they never need to be written. 4085 */ 4086 buf = dma_alloc_coherent(mmc_dev(mmc), 4087 host->align_buffer_sz + host->adma_table_sz, 4088 &dma, GFP_KERNEL); 4089 if (!buf) { 4090 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4091 mmc_hostname(mmc)); 4092 host->flags &= ~SDHCI_USE_ADMA; 4093 } else if ((dma + host->align_buffer_sz) & 4094 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4095 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4096 mmc_hostname(mmc)); 4097 host->flags &= ~SDHCI_USE_ADMA; 4098 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4099 host->adma_table_sz, buf, dma); 4100 } else { 4101 host->align_buffer = buf; 4102 host->align_addr = dma; 4103 4104 host->adma_table = buf + host->align_buffer_sz; 4105 host->adma_addr = dma + host->align_buffer_sz; 4106 } 4107 } 4108 4109 /* 4110 * If we use DMA, then it's up to the caller to set the DMA 4111 * mask, but PIO does not need the hw shim so we set a new 4112 * mask here in that case. 4113 */ 4114 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4115 host->dma_mask = DMA_BIT_MASK(64); 4116 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4117 } 4118 4119 if (host->version >= SDHCI_SPEC_300) 4120 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) 4121 >> SDHCI_CLOCK_BASE_SHIFT; 4122 else 4123 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK) 4124 >> SDHCI_CLOCK_BASE_SHIFT; 4125 4126 host->max_clk *= 1000000; 4127 if (host->max_clk == 0 || host->quirks & 4128 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4129 if (!host->ops->get_max_clock) { 4130 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4131 mmc_hostname(mmc)); 4132 ret = -ENODEV; 4133 goto undma; 4134 } 4135 host->max_clk = host->ops->get_max_clock(host); 4136 } 4137 4138 /* 4139 * In case of Host Controller v3.00, find out whether clock 4140 * multiplier is supported. 4141 */ 4142 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >> 4143 SDHCI_CLOCK_MUL_SHIFT; 4144 4145 /* 4146 * In case the value in Clock Multiplier is 0, then programmable 4147 * clock mode is not supported, otherwise the actual clock 4148 * multiplier is one more than the value of Clock Multiplier 4149 * in the Capabilities Register. 4150 */ 4151 if (host->clk_mul) 4152 host->clk_mul += 1; 4153 4154 /* 4155 * Set host parameters. 4156 */ 4157 max_clk = host->max_clk; 4158 4159 if (host->ops->get_min_clock) 4160 mmc->f_min = host->ops->get_min_clock(host); 4161 else if (host->version >= SDHCI_SPEC_300) { 4162 if (host->clk_mul) 4163 max_clk = host->max_clk * host->clk_mul; 4164 /* 4165 * Divided Clock Mode minimum clock rate is always less than 4166 * Programmable Clock Mode minimum clock rate. 4167 */ 4168 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4169 } else 4170 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4171 4172 if (!mmc->f_max || mmc->f_max > max_clk) 4173 mmc->f_max = max_clk; 4174 4175 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4176 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> 4177 SDHCI_TIMEOUT_CLK_SHIFT; 4178 4179 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4180 host->timeout_clk *= 1000; 4181 4182 if (host->timeout_clk == 0) { 4183 if (!host->ops->get_timeout_clock) { 4184 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4185 mmc_hostname(mmc)); 4186 ret = -ENODEV; 4187 goto undma; 4188 } 4189 4190 host->timeout_clk = 4191 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4192 1000); 4193 } 4194 4195 if (override_timeout_clk) 4196 host->timeout_clk = override_timeout_clk; 4197 4198 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4199 host->ops->get_max_timeout_count(host) : 1 << 27; 4200 mmc->max_busy_timeout /= host->timeout_clk; 4201 } 4202 4203 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4204 !host->ops->get_max_timeout_count) 4205 mmc->max_busy_timeout = 0; 4206 4207 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 4208 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4209 4210 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4211 host->flags |= SDHCI_AUTO_CMD12; 4212 4213 /* 4214 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4215 * For v4 mode, SDMA may use Auto-CMD23 as well. 4216 */ 4217 if ((host->version >= SDHCI_SPEC_300) && 4218 ((host->flags & SDHCI_USE_ADMA) || 4219 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4220 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4221 host->flags |= SDHCI_AUTO_CMD23; 4222 DBG("Auto-CMD23 available\n"); 4223 } else { 4224 DBG("Auto-CMD23 unavailable\n"); 4225 } 4226 4227 /* 4228 * A controller may support 8-bit width, but the board itself 4229 * might not have the pins brought out. Boards that support 4230 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4231 * their platform code before calling sdhci_add_host(), and we 4232 * won't assume 8-bit width for hosts without that CAP. 4233 */ 4234 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4235 mmc->caps |= MMC_CAP_4_BIT_DATA; 4236 4237 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4238 mmc->caps &= ~MMC_CAP_CMD23; 4239 4240 if (host->caps & SDHCI_CAN_DO_HISPD) 4241 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4242 4243 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4244 mmc_card_is_removable(mmc) && 4245 mmc_gpio_get_cd(host->mmc) < 0) 4246 mmc->caps |= MMC_CAP_NEEDS_POLL; 4247 4248 if (!IS_ERR(mmc->supply.vqmmc)) { 4249 ret = regulator_enable(mmc->supply.vqmmc); 4250 4251 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4252 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4253 1950000)) 4254 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4255 SDHCI_SUPPORT_SDR50 | 4256 SDHCI_SUPPORT_DDR50); 4257 4258 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4259 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4260 3600000)) 4261 host->flags &= ~SDHCI_SIGNALING_330; 4262 4263 if (ret) { 4264 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4265 mmc_hostname(mmc), ret); 4266 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4267 } 4268 } 4269 4270 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4271 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4272 SDHCI_SUPPORT_DDR50); 4273 /* 4274 * The SDHCI controller in a SoC might support HS200/HS400 4275 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4276 * but if the board is modeled such that the IO lines are not 4277 * connected to 1.8v then HS200/HS400 cannot be supported. 4278 * Disable HS200/HS400 if the board does not have 1.8v connected 4279 * to the IO lines. (Applicable for other modes in 1.8v) 4280 */ 4281 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4282 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4283 } 4284 4285 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4286 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4287 SDHCI_SUPPORT_DDR50)) 4288 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4289 4290 /* SDR104 supports also implies SDR50 support */ 4291 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4292 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4293 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4294 * field can be promoted to support HS200. 4295 */ 4296 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4297 mmc->caps2 |= MMC_CAP2_HS200; 4298 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4299 mmc->caps |= MMC_CAP_UHS_SDR50; 4300 } 4301 4302 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4303 (host->caps1 & SDHCI_SUPPORT_HS400)) 4304 mmc->caps2 |= MMC_CAP2_HS400; 4305 4306 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4307 (IS_ERR(mmc->supply.vqmmc) || 4308 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4309 1300000))) 4310 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4311 4312 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4313 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4314 mmc->caps |= MMC_CAP_UHS_DDR50; 4315 4316 /* Does the host need tuning for SDR50? */ 4317 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4318 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4319 4320 /* Driver Type(s) (A, C, D) supported by the host */ 4321 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4322 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4323 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4324 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4325 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4326 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4327 4328 /* Initial value for re-tuning timer count */ 4329 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 4330 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 4331 4332 /* 4333 * In case Re-tuning Timer is not disabled, the actual value of 4334 * re-tuning timer will be 2 ^ (n - 1). 4335 */ 4336 if (host->tuning_count) 4337 host->tuning_count = 1 << (host->tuning_count - 1); 4338 4339 /* Re-tuning mode supported by the Host Controller */ 4340 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >> 4341 SDHCI_RETUNING_MODE_SHIFT; 4342 4343 ocr_avail = 0; 4344 4345 /* 4346 * According to SD Host Controller spec v3.00, if the Host System 4347 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4348 * the value is meaningful only if Voltage Support in the Capabilities 4349 * register is set. The actual current value is 4 times the register 4350 * value. 4351 */ 4352 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4353 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4354 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4355 if (curr > 0) { 4356 4357 /* convert to SDHCI_MAX_CURRENT format */ 4358 curr = curr/1000; /* convert to mA */ 4359 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4360 4361 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4362 max_current_caps = 4363 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 4364 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 4365 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 4366 } 4367 } 4368 4369 if (host->caps & SDHCI_CAN_VDD_330) { 4370 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4371 4372 mmc->max_current_330 = ((max_current_caps & 4373 SDHCI_MAX_CURRENT_330_MASK) >> 4374 SDHCI_MAX_CURRENT_330_SHIFT) * 4375 SDHCI_MAX_CURRENT_MULTIPLIER; 4376 } 4377 if (host->caps & SDHCI_CAN_VDD_300) { 4378 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4379 4380 mmc->max_current_300 = ((max_current_caps & 4381 SDHCI_MAX_CURRENT_300_MASK) >> 4382 SDHCI_MAX_CURRENT_300_SHIFT) * 4383 SDHCI_MAX_CURRENT_MULTIPLIER; 4384 } 4385 if (host->caps & SDHCI_CAN_VDD_180) { 4386 ocr_avail |= MMC_VDD_165_195; 4387 4388 mmc->max_current_180 = ((max_current_caps & 4389 SDHCI_MAX_CURRENT_180_MASK) >> 4390 SDHCI_MAX_CURRENT_180_SHIFT) * 4391 SDHCI_MAX_CURRENT_MULTIPLIER; 4392 } 4393 4394 /* If OCR set by host, use it instead. */ 4395 if (host->ocr_mask) 4396 ocr_avail = host->ocr_mask; 4397 4398 /* If OCR set by external regulators, give it highest prio. */ 4399 if (mmc->ocr_avail) 4400 ocr_avail = mmc->ocr_avail; 4401 4402 mmc->ocr_avail = ocr_avail; 4403 mmc->ocr_avail_sdio = ocr_avail; 4404 if (host->ocr_avail_sdio) 4405 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4406 mmc->ocr_avail_sd = ocr_avail; 4407 if (host->ocr_avail_sd) 4408 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4409 else /* normal SD controllers don't support 1.8V */ 4410 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4411 mmc->ocr_avail_mmc = ocr_avail; 4412 if (host->ocr_avail_mmc) 4413 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4414 4415 if (mmc->ocr_avail == 0) { 4416 pr_err("%s: Hardware doesn't report any support voltages.\n", 4417 mmc_hostname(mmc)); 4418 ret = -ENODEV; 4419 goto unreg; 4420 } 4421 4422 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4423 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4424 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4425 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4426 host->flags |= SDHCI_SIGNALING_180; 4427 4428 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4429 host->flags |= SDHCI_SIGNALING_120; 4430 4431 spin_lock_init(&host->lock); 4432 4433 /* 4434 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4435 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4436 * is less anyway. 4437 */ 4438 mmc->max_req_size = 524288; 4439 4440 /* 4441 * Maximum number of segments. Depends on if the hardware 4442 * can do scatter/gather or not. 4443 */ 4444 if (host->flags & SDHCI_USE_ADMA) { 4445 mmc->max_segs = SDHCI_MAX_SEGS; 4446 } else if (host->flags & SDHCI_USE_SDMA) { 4447 mmc->max_segs = 1; 4448 if (swiotlb_max_segment()) { 4449 unsigned int max_req_size = (1 << IO_TLB_SHIFT) * 4450 IO_TLB_SEGSIZE; 4451 mmc->max_req_size = min(mmc->max_req_size, 4452 max_req_size); 4453 } 4454 } else { /* PIO */ 4455 mmc->max_segs = SDHCI_MAX_SEGS; 4456 } 4457 4458 /* 4459 * Maximum segment size. Could be one segment with the maximum number 4460 * of bytes. When doing hardware scatter/gather, each entry cannot 4461 * be larger than 64 KiB though. 4462 */ 4463 if (host->flags & SDHCI_USE_ADMA) { 4464 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 4465 mmc->max_seg_size = 65535; 4466 else 4467 mmc->max_seg_size = 65536; 4468 } else { 4469 mmc->max_seg_size = mmc->max_req_size; 4470 } 4471 4472 /* 4473 * Maximum block size. This varies from controller to controller and 4474 * is specified in the capabilities register. 4475 */ 4476 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4477 mmc->max_blk_size = 2; 4478 } else { 4479 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4480 SDHCI_MAX_BLOCK_SHIFT; 4481 if (mmc->max_blk_size >= 3) { 4482 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4483 mmc_hostname(mmc)); 4484 mmc->max_blk_size = 0; 4485 } 4486 } 4487 4488 mmc->max_blk_size = 512 << mmc->max_blk_size; 4489 4490 /* 4491 * Maximum block count. 4492 */ 4493 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4494 4495 if (mmc->max_segs == 1) 4496 /* This may alter mmc->*_blk_* parameters */ 4497 sdhci_allocate_bounce_buffer(host); 4498 4499 return 0; 4500 4501 unreg: 4502 if (!IS_ERR(mmc->supply.vqmmc)) 4503 regulator_disable(mmc->supply.vqmmc); 4504 undma: 4505 if (host->align_buffer) 4506 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4507 host->adma_table_sz, host->align_buffer, 4508 host->align_addr); 4509 host->adma_table = NULL; 4510 host->align_buffer = NULL; 4511 4512 return ret; 4513 } 4514 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4515 4516 void sdhci_cleanup_host(struct sdhci_host *host) 4517 { 4518 struct mmc_host *mmc = host->mmc; 4519 4520 if (!IS_ERR(mmc->supply.vqmmc)) 4521 regulator_disable(mmc->supply.vqmmc); 4522 4523 if (host->align_buffer) 4524 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4525 host->adma_table_sz, host->align_buffer, 4526 host->align_addr); 4527 4528 if (host->use_external_dma) 4529 sdhci_external_dma_release(host); 4530 4531 host->adma_table = NULL; 4532 host->align_buffer = NULL; 4533 } 4534 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4535 4536 int __sdhci_add_host(struct sdhci_host *host) 4537 { 4538 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4539 struct mmc_host *mmc = host->mmc; 4540 int ret; 4541 4542 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4543 if (!host->complete_wq) 4544 return -ENOMEM; 4545 4546 INIT_WORK(&host->complete_work, sdhci_complete_work); 4547 4548 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4549 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4550 4551 init_waitqueue_head(&host->buf_ready_int); 4552 4553 sdhci_init(host, 0); 4554 4555 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4556 IRQF_SHARED, mmc_hostname(mmc), host); 4557 if (ret) { 4558 pr_err("%s: Failed to request IRQ %d: %d\n", 4559 mmc_hostname(mmc), host->irq, ret); 4560 goto unwq; 4561 } 4562 4563 ret = sdhci_led_register(host); 4564 if (ret) { 4565 pr_err("%s: Failed to register LED device: %d\n", 4566 mmc_hostname(mmc), ret); 4567 goto unirq; 4568 } 4569 4570 ret = mmc_add_host(mmc); 4571 if (ret) 4572 goto unled; 4573 4574 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4575 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4576 host->use_external_dma ? "External DMA" : 4577 (host->flags & SDHCI_USE_ADMA) ? 4578 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4579 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4580 4581 sdhci_enable_card_detection(host); 4582 4583 return 0; 4584 4585 unled: 4586 sdhci_led_unregister(host); 4587 unirq: 4588 sdhci_do_reset(host, SDHCI_RESET_ALL); 4589 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4590 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4591 free_irq(host->irq, host); 4592 unwq: 4593 destroy_workqueue(host->complete_wq); 4594 4595 return ret; 4596 } 4597 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4598 4599 int sdhci_add_host(struct sdhci_host *host) 4600 { 4601 int ret; 4602 4603 ret = sdhci_setup_host(host); 4604 if (ret) 4605 return ret; 4606 4607 ret = __sdhci_add_host(host); 4608 if (ret) 4609 goto cleanup; 4610 4611 return 0; 4612 4613 cleanup: 4614 sdhci_cleanup_host(host); 4615 4616 return ret; 4617 } 4618 EXPORT_SYMBOL_GPL(sdhci_add_host); 4619 4620 void sdhci_remove_host(struct sdhci_host *host, int dead) 4621 { 4622 struct mmc_host *mmc = host->mmc; 4623 unsigned long flags; 4624 4625 if (dead) { 4626 spin_lock_irqsave(&host->lock, flags); 4627 4628 host->flags |= SDHCI_DEVICE_DEAD; 4629 4630 if (sdhci_has_requests(host)) { 4631 pr_err("%s: Controller removed during " 4632 " transfer!\n", mmc_hostname(mmc)); 4633 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4634 } 4635 4636 spin_unlock_irqrestore(&host->lock, flags); 4637 } 4638 4639 sdhci_disable_card_detection(host); 4640 4641 mmc_remove_host(mmc); 4642 4643 sdhci_led_unregister(host); 4644 4645 if (!dead) 4646 sdhci_do_reset(host, SDHCI_RESET_ALL); 4647 4648 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4649 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4650 free_irq(host->irq, host); 4651 4652 del_timer_sync(&host->timer); 4653 del_timer_sync(&host->data_timer); 4654 4655 destroy_workqueue(host->complete_wq); 4656 4657 if (!IS_ERR(mmc->supply.vqmmc)) 4658 regulator_disable(mmc->supply.vqmmc); 4659 4660 if (host->align_buffer) 4661 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4662 host->adma_table_sz, host->align_buffer, 4663 host->align_addr); 4664 4665 if (host->use_external_dma) 4666 sdhci_external_dma_release(host); 4667 4668 host->adma_table = NULL; 4669 host->align_buffer = NULL; 4670 } 4671 4672 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4673 4674 void sdhci_free_host(struct sdhci_host *host) 4675 { 4676 mmc_free_host(host->mmc); 4677 } 4678 4679 EXPORT_SYMBOL_GPL(sdhci_free_host); 4680 4681 /*****************************************************************************\ 4682 * * 4683 * Driver init/exit * 4684 * * 4685 \*****************************************************************************/ 4686 4687 static int __init sdhci_drv_init(void) 4688 { 4689 pr_info(DRIVER_NAME 4690 ": Secure Digital Host Controller Interface driver\n"); 4691 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4692 4693 return 0; 4694 } 4695 4696 static void __exit sdhci_drv_exit(void) 4697 { 4698 } 4699 4700 module_init(sdhci_drv_init); 4701 module_exit(sdhci_drv_exit); 4702 4703 module_param(debug_quirks, uint, 0444); 4704 module_param(debug_quirks2, uint, 0444); 4705 4706 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4707 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4708 MODULE_LICENSE("GPL"); 4709 4710 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4711 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4712