1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/highmem.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/slab.h> 22 #include <linux/scatterlist.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 26 #include <linux/leds.h> 27 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/sdio.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #include "sdhci.h" 35 36 #define DRIVER_NAME "sdhci" 37 38 #define DBG(f, x...) \ 39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 40 41 #define MAX_TUNING_LOOP 40 42 43 static unsigned int debug_quirks = 0; 44 static unsigned int debug_quirks2; 45 46 static void sdhci_finish_data(struct sdhci_host *); 47 48 static void sdhci_finish_command(struct sdhci_host *); 49 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 static int sdhci_get_cd(struct mmc_host *mmc); 52 53 static void sdhci_dumpregs(struct sdhci_host *host) 54 { 55 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 56 mmc_hostname(host->mmc)); 57 58 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_ACMD12_ERR), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 92 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 93 94 if (host->flags & SDHCI_USE_ADMA) { 95 if (host->flags & SDHCI_USE_64_BIT_DMA) 96 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 97 readl(host->ioaddr + SDHCI_ADMA_ERROR), 98 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), 99 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 100 else 101 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 102 readl(host->ioaddr + SDHCI_ADMA_ERROR), 103 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 104 } 105 106 pr_debug(DRIVER_NAME ": ===========================================\n"); 107 } 108 109 /*****************************************************************************\ 110 * * 111 * Low level functions * 112 * * 113 \*****************************************************************************/ 114 115 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 116 { 117 u32 present; 118 119 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 120 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 121 return; 122 123 if (enable) { 124 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 125 SDHCI_CARD_PRESENT; 126 127 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 128 SDHCI_INT_CARD_INSERT; 129 } else { 130 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 131 } 132 133 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 134 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 135 } 136 137 static void sdhci_enable_card_detection(struct sdhci_host *host) 138 { 139 sdhci_set_card_detection(host, true); 140 } 141 142 static void sdhci_disable_card_detection(struct sdhci_host *host) 143 { 144 sdhci_set_card_detection(host, false); 145 } 146 147 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 148 { 149 if (host->bus_on) 150 return; 151 host->bus_on = true; 152 pm_runtime_get_noresume(host->mmc->parent); 153 } 154 155 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 156 { 157 if (!host->bus_on) 158 return; 159 host->bus_on = false; 160 pm_runtime_put_noidle(host->mmc->parent); 161 } 162 163 void sdhci_reset(struct sdhci_host *host, u8 mask) 164 { 165 unsigned long timeout; 166 167 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 168 169 if (mask & SDHCI_RESET_ALL) { 170 host->clock = 0; 171 /* Reset-all turns off SD Bus Power */ 172 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 173 sdhci_runtime_pm_bus_off(host); 174 } 175 176 /* Wait max 100 ms */ 177 timeout = 100; 178 179 /* hw clears the bit when it's done */ 180 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 181 if (timeout == 0) { 182 pr_err("%s: Reset 0x%x never completed.\n", 183 mmc_hostname(host->mmc), (int)mask); 184 sdhci_dumpregs(host); 185 return; 186 } 187 timeout--; 188 mdelay(1); 189 } 190 } 191 EXPORT_SYMBOL_GPL(sdhci_reset); 192 193 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 194 { 195 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 196 if (!sdhci_get_cd(host->mmc)) 197 return; 198 } 199 200 host->ops->reset(host, mask); 201 202 if (mask & SDHCI_RESET_ALL) { 203 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 204 if (host->ops->enable_dma) 205 host->ops->enable_dma(host); 206 } 207 208 /* Resetting the controller clears many */ 209 host->preset_enabled = false; 210 } 211 } 212 213 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 214 215 static void sdhci_init(struct sdhci_host *host, int soft) 216 { 217 if (soft) 218 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 219 else 220 sdhci_do_reset(host, SDHCI_RESET_ALL); 221 222 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 223 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 224 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 225 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 226 SDHCI_INT_RESPONSE; 227 228 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 229 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 230 231 if (soft) { 232 /* force clock reconfiguration */ 233 host->clock = 0; 234 sdhci_set_ios(host->mmc, &host->mmc->ios); 235 } 236 } 237 238 static void sdhci_reinit(struct sdhci_host *host) 239 { 240 sdhci_init(host, 0); 241 sdhci_enable_card_detection(host); 242 } 243 244 static void __sdhci_led_activate(struct sdhci_host *host) 245 { 246 u8 ctrl; 247 248 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 249 ctrl |= SDHCI_CTRL_LED; 250 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 251 } 252 253 static void __sdhci_led_deactivate(struct sdhci_host *host) 254 { 255 u8 ctrl; 256 257 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 258 ctrl &= ~SDHCI_CTRL_LED; 259 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 260 } 261 262 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 263 static void sdhci_led_control(struct led_classdev *led, 264 enum led_brightness brightness) 265 { 266 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 267 unsigned long flags; 268 269 spin_lock_irqsave(&host->lock, flags); 270 271 if (host->runtime_suspended) 272 goto out; 273 274 if (brightness == LED_OFF) 275 __sdhci_led_deactivate(host); 276 else 277 __sdhci_led_activate(host); 278 out: 279 spin_unlock_irqrestore(&host->lock, flags); 280 } 281 282 static int sdhci_led_register(struct sdhci_host *host) 283 { 284 struct mmc_host *mmc = host->mmc; 285 286 snprintf(host->led_name, sizeof(host->led_name), 287 "%s::", mmc_hostname(mmc)); 288 289 host->led.name = host->led_name; 290 host->led.brightness = LED_OFF; 291 host->led.default_trigger = mmc_hostname(mmc); 292 host->led.brightness_set = sdhci_led_control; 293 294 return led_classdev_register(mmc_dev(mmc), &host->led); 295 } 296 297 static void sdhci_led_unregister(struct sdhci_host *host) 298 { 299 led_classdev_unregister(&host->led); 300 } 301 302 static inline void sdhci_led_activate(struct sdhci_host *host) 303 { 304 } 305 306 static inline void sdhci_led_deactivate(struct sdhci_host *host) 307 { 308 } 309 310 #else 311 312 static inline int sdhci_led_register(struct sdhci_host *host) 313 { 314 return 0; 315 } 316 317 static inline void sdhci_led_unregister(struct sdhci_host *host) 318 { 319 } 320 321 static inline void sdhci_led_activate(struct sdhci_host *host) 322 { 323 __sdhci_led_activate(host); 324 } 325 326 static inline void sdhci_led_deactivate(struct sdhci_host *host) 327 { 328 __sdhci_led_deactivate(host); 329 } 330 331 #endif 332 333 /*****************************************************************************\ 334 * * 335 * Core functions * 336 * * 337 \*****************************************************************************/ 338 339 static void sdhci_read_block_pio(struct sdhci_host *host) 340 { 341 unsigned long flags; 342 size_t blksize, len, chunk; 343 u32 uninitialized_var(scratch); 344 u8 *buf; 345 346 DBG("PIO reading\n"); 347 348 blksize = host->data->blksz; 349 chunk = 0; 350 351 local_irq_save(flags); 352 353 while (blksize) { 354 BUG_ON(!sg_miter_next(&host->sg_miter)); 355 356 len = min(host->sg_miter.length, blksize); 357 358 blksize -= len; 359 host->sg_miter.consumed = len; 360 361 buf = host->sg_miter.addr; 362 363 while (len) { 364 if (chunk == 0) { 365 scratch = sdhci_readl(host, SDHCI_BUFFER); 366 chunk = 4; 367 } 368 369 *buf = scratch & 0xFF; 370 371 buf++; 372 scratch >>= 8; 373 chunk--; 374 len--; 375 } 376 } 377 378 sg_miter_stop(&host->sg_miter); 379 380 local_irq_restore(flags); 381 } 382 383 static void sdhci_write_block_pio(struct sdhci_host *host) 384 { 385 unsigned long flags; 386 size_t blksize, len, chunk; 387 u32 scratch; 388 u8 *buf; 389 390 DBG("PIO writing\n"); 391 392 blksize = host->data->blksz; 393 chunk = 0; 394 scratch = 0; 395 396 local_irq_save(flags); 397 398 while (blksize) { 399 BUG_ON(!sg_miter_next(&host->sg_miter)); 400 401 len = min(host->sg_miter.length, blksize); 402 403 blksize -= len; 404 host->sg_miter.consumed = len; 405 406 buf = host->sg_miter.addr; 407 408 while (len) { 409 scratch |= (u32)*buf << (chunk * 8); 410 411 buf++; 412 chunk++; 413 len--; 414 415 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 416 sdhci_writel(host, scratch, SDHCI_BUFFER); 417 chunk = 0; 418 scratch = 0; 419 } 420 } 421 } 422 423 sg_miter_stop(&host->sg_miter); 424 425 local_irq_restore(flags); 426 } 427 428 static void sdhci_transfer_pio(struct sdhci_host *host) 429 { 430 u32 mask; 431 432 BUG_ON(!host->data); 433 434 if (host->blocks == 0) 435 return; 436 437 if (host->data->flags & MMC_DATA_READ) 438 mask = SDHCI_DATA_AVAILABLE; 439 else 440 mask = SDHCI_SPACE_AVAILABLE; 441 442 /* 443 * Some controllers (JMicron JMB38x) mess up the buffer bits 444 * for transfers < 4 bytes. As long as it is just one block, 445 * we can ignore the bits. 446 */ 447 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 448 (host->data->blocks == 1)) 449 mask = ~0; 450 451 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 452 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 453 udelay(100); 454 455 if (host->data->flags & MMC_DATA_READ) 456 sdhci_read_block_pio(host); 457 else 458 sdhci_write_block_pio(host); 459 460 host->blocks--; 461 if (host->blocks == 0) 462 break; 463 } 464 465 DBG("PIO transfer complete.\n"); 466 } 467 468 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 469 struct mmc_data *data, int cookie) 470 { 471 int sg_count; 472 473 /* 474 * If the data buffers are already mapped, return the previous 475 * dma_map_sg() result. 476 */ 477 if (data->host_cookie == COOKIE_PRE_MAPPED) 478 return data->sg_count; 479 480 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 481 data->flags & MMC_DATA_WRITE ? 482 DMA_TO_DEVICE : DMA_FROM_DEVICE); 483 484 if (sg_count == 0) 485 return -ENOSPC; 486 487 data->sg_count = sg_count; 488 data->host_cookie = cookie; 489 490 return sg_count; 491 } 492 493 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 494 { 495 local_irq_save(*flags); 496 return kmap_atomic(sg_page(sg)) + sg->offset; 497 } 498 499 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 500 { 501 kunmap_atomic(buffer); 502 local_irq_restore(*flags); 503 } 504 505 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, 506 dma_addr_t addr, int len, unsigned cmd) 507 { 508 struct sdhci_adma2_64_desc *dma_desc = desc; 509 510 /* 32-bit and 64-bit descriptors have these members in same position */ 511 dma_desc->cmd = cpu_to_le16(cmd); 512 dma_desc->len = cpu_to_le16(len); 513 dma_desc->addr_lo = cpu_to_le32((u32)addr); 514 515 if (host->flags & SDHCI_USE_64_BIT_DMA) 516 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); 517 } 518 519 static void sdhci_adma_mark_end(void *desc) 520 { 521 struct sdhci_adma2_64_desc *dma_desc = desc; 522 523 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 524 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 525 } 526 527 static void sdhci_adma_table_pre(struct sdhci_host *host, 528 struct mmc_data *data, int sg_count) 529 { 530 struct scatterlist *sg; 531 unsigned long flags; 532 dma_addr_t addr, align_addr; 533 void *desc, *align; 534 char *buffer; 535 int len, offset, i; 536 537 /* 538 * The spec does not specify endianness of descriptor table. 539 * We currently guess that it is LE. 540 */ 541 542 host->sg_count = sg_count; 543 544 desc = host->adma_table; 545 align = host->align_buffer; 546 547 align_addr = host->align_addr; 548 549 for_each_sg(data->sg, sg, host->sg_count, i) { 550 addr = sg_dma_address(sg); 551 len = sg_dma_len(sg); 552 553 /* 554 * The SDHCI specification states that ADMA addresses must 555 * be 32-bit aligned. If they aren't, then we use a bounce 556 * buffer for the (up to three) bytes that screw up the 557 * alignment. 558 */ 559 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 560 SDHCI_ADMA2_MASK; 561 if (offset) { 562 if (data->flags & MMC_DATA_WRITE) { 563 buffer = sdhci_kmap_atomic(sg, &flags); 564 memcpy(align, buffer, offset); 565 sdhci_kunmap_atomic(buffer, &flags); 566 } 567 568 /* tran, valid */ 569 sdhci_adma_write_desc(host, desc, align_addr, offset, 570 ADMA2_TRAN_VALID); 571 572 BUG_ON(offset > 65536); 573 574 align += SDHCI_ADMA2_ALIGN; 575 align_addr += SDHCI_ADMA2_ALIGN; 576 577 desc += host->desc_sz; 578 579 addr += offset; 580 len -= offset; 581 } 582 583 BUG_ON(len > 65536); 584 585 if (len) { 586 /* tran, valid */ 587 sdhci_adma_write_desc(host, desc, addr, len, 588 ADMA2_TRAN_VALID); 589 desc += host->desc_sz; 590 } 591 592 /* 593 * If this triggers then we have a calculation bug 594 * somewhere. :/ 595 */ 596 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 597 } 598 599 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 600 /* Mark the last descriptor as the terminating descriptor */ 601 if (desc != host->adma_table) { 602 desc -= host->desc_sz; 603 sdhci_adma_mark_end(desc); 604 } 605 } else { 606 /* Add a terminating entry - nop, end, valid */ 607 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); 608 } 609 } 610 611 static void sdhci_adma_table_post(struct sdhci_host *host, 612 struct mmc_data *data) 613 { 614 struct scatterlist *sg; 615 int i, size; 616 void *align; 617 char *buffer; 618 unsigned long flags; 619 620 if (data->flags & MMC_DATA_READ) { 621 bool has_unaligned = false; 622 623 /* Do a quick scan of the SG list for any unaligned mappings */ 624 for_each_sg(data->sg, sg, host->sg_count, i) 625 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 626 has_unaligned = true; 627 break; 628 } 629 630 if (has_unaligned) { 631 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 632 data->sg_len, DMA_FROM_DEVICE); 633 634 align = host->align_buffer; 635 636 for_each_sg(data->sg, sg, host->sg_count, i) { 637 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 638 size = SDHCI_ADMA2_ALIGN - 639 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 640 641 buffer = sdhci_kmap_atomic(sg, &flags); 642 memcpy(buffer, align, size); 643 sdhci_kunmap_atomic(buffer, &flags); 644 645 align += SDHCI_ADMA2_ALIGN; 646 } 647 } 648 } 649 } 650 } 651 652 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) 653 { 654 u8 count; 655 struct mmc_data *data = cmd->data; 656 unsigned target_timeout, current_timeout; 657 658 /* 659 * If the host controller provides us with an incorrect timeout 660 * value, just skip the check and use 0xE. The hardware may take 661 * longer to time out, but that's much better than having a too-short 662 * timeout value. 663 */ 664 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 665 return 0xE; 666 667 /* Unspecified timeout, assume max */ 668 if (!data && !cmd->busy_timeout) 669 return 0xE; 670 671 /* timeout in us */ 672 if (!data) 673 target_timeout = cmd->busy_timeout * 1000; 674 else { 675 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 676 if (host->clock && data->timeout_clks) { 677 unsigned long long val; 678 679 /* 680 * data->timeout_clks is in units of clock cycles. 681 * host->clock is in Hz. target_timeout is in us. 682 * Hence, us = 1000000 * cycles / Hz. Round up. 683 */ 684 val = 1000000 * data->timeout_clks; 685 if (do_div(val, host->clock)) 686 target_timeout++; 687 target_timeout += val; 688 } 689 } 690 691 /* 692 * Figure out needed cycles. 693 * We do this in steps in order to fit inside a 32 bit int. 694 * The first step is the minimum timeout, which will have a 695 * minimum resolution of 6 bits: 696 * (1) 2^13*1000 > 2^22, 697 * (2) host->timeout_clk < 2^16 698 * => 699 * (1) / (2) > 2^6 700 */ 701 count = 0; 702 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 703 while (current_timeout < target_timeout) { 704 count++; 705 current_timeout <<= 1; 706 if (count >= 0xF) 707 break; 708 } 709 710 if (count >= 0xF) { 711 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", 712 mmc_hostname(host->mmc), count, cmd->opcode); 713 count = 0xE; 714 } 715 716 return count; 717 } 718 719 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 720 { 721 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 722 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 723 724 if (host->flags & SDHCI_REQ_USE_DMA) 725 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 726 else 727 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 728 729 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 730 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 731 } 732 733 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 734 { 735 u8 count; 736 737 if (host->ops->set_timeout) { 738 host->ops->set_timeout(host, cmd); 739 } else { 740 count = sdhci_calc_timeout(host, cmd); 741 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 742 } 743 } 744 745 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 746 { 747 u8 ctrl; 748 struct mmc_data *data = cmd->data; 749 750 WARN_ON(host->data); 751 752 if (data || (cmd->flags & MMC_RSP_BUSY)) 753 sdhci_set_timeout(host, cmd); 754 755 if (!data) 756 return; 757 758 /* Sanity checks */ 759 BUG_ON(data->blksz * data->blocks > 524288); 760 BUG_ON(data->blksz > host->mmc->max_blk_size); 761 BUG_ON(data->blocks > 65535); 762 763 host->data = data; 764 host->data_early = 0; 765 host->data->bytes_xfered = 0; 766 767 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 768 struct scatterlist *sg; 769 unsigned int length_mask, offset_mask; 770 int i; 771 772 host->flags |= SDHCI_REQ_USE_DMA; 773 774 /* 775 * FIXME: This doesn't account for merging when mapping the 776 * scatterlist. 777 * 778 * The assumption here being that alignment and lengths are 779 * the same after DMA mapping to device address space. 780 */ 781 length_mask = 0; 782 offset_mask = 0; 783 if (host->flags & SDHCI_USE_ADMA) { 784 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 785 length_mask = 3; 786 /* 787 * As we use up to 3 byte chunks to work 788 * around alignment problems, we need to 789 * check the offset as well. 790 */ 791 offset_mask = 3; 792 } 793 } else { 794 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 795 length_mask = 3; 796 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 797 offset_mask = 3; 798 } 799 800 if (unlikely(length_mask | offset_mask)) { 801 for_each_sg(data->sg, sg, data->sg_len, i) { 802 if (sg->length & length_mask) { 803 DBG("Reverting to PIO because of transfer size (%d)\n", 804 sg->length); 805 host->flags &= ~SDHCI_REQ_USE_DMA; 806 break; 807 } 808 if (sg->offset & offset_mask) { 809 DBG("Reverting to PIO because of bad alignment\n"); 810 host->flags &= ~SDHCI_REQ_USE_DMA; 811 break; 812 } 813 } 814 } 815 } 816 817 if (host->flags & SDHCI_REQ_USE_DMA) { 818 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 819 820 if (sg_cnt <= 0) { 821 /* 822 * This only happens when someone fed 823 * us an invalid request. 824 */ 825 WARN_ON(1); 826 host->flags &= ~SDHCI_REQ_USE_DMA; 827 } else if (host->flags & SDHCI_USE_ADMA) { 828 sdhci_adma_table_pre(host, data, sg_cnt); 829 830 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS); 831 if (host->flags & SDHCI_USE_64_BIT_DMA) 832 sdhci_writel(host, 833 (u64)host->adma_addr >> 32, 834 SDHCI_ADMA_ADDRESS_HI); 835 } else { 836 WARN_ON(sg_cnt != 1); 837 sdhci_writel(host, sg_dma_address(data->sg), 838 SDHCI_DMA_ADDRESS); 839 } 840 } 841 842 /* 843 * Always adjust the DMA selection as some controllers 844 * (e.g. JMicron) can't do PIO properly when the selection 845 * is ADMA. 846 */ 847 if (host->version >= SDHCI_SPEC_200) { 848 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 849 ctrl &= ~SDHCI_CTRL_DMA_MASK; 850 if ((host->flags & SDHCI_REQ_USE_DMA) && 851 (host->flags & SDHCI_USE_ADMA)) { 852 if (host->flags & SDHCI_USE_64_BIT_DMA) 853 ctrl |= SDHCI_CTRL_ADMA64; 854 else 855 ctrl |= SDHCI_CTRL_ADMA32; 856 } else { 857 ctrl |= SDHCI_CTRL_SDMA; 858 } 859 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 860 } 861 862 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 863 int flags; 864 865 flags = SG_MITER_ATOMIC; 866 if (host->data->flags & MMC_DATA_READ) 867 flags |= SG_MITER_TO_SG; 868 else 869 flags |= SG_MITER_FROM_SG; 870 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 871 host->blocks = data->blocks; 872 } 873 874 sdhci_set_transfer_irqs(host); 875 876 /* Set the DMA boundary value and block size */ 877 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 878 data->blksz), SDHCI_BLOCK_SIZE); 879 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 880 } 881 882 static void sdhci_set_transfer_mode(struct sdhci_host *host, 883 struct mmc_command *cmd) 884 { 885 u16 mode = 0; 886 struct mmc_data *data = cmd->data; 887 888 if (data == NULL) { 889 if (host->quirks2 & 890 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 891 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 892 } else { 893 /* clear Auto CMD settings for no data CMDs */ 894 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 895 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 896 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 897 } 898 return; 899 } 900 901 WARN_ON(!host->data); 902 903 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 904 mode = SDHCI_TRNS_BLK_CNT_EN; 905 906 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 907 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 908 /* 909 * If we are sending CMD23, CMD12 never gets sent 910 * on successful completion (so no Auto-CMD12). 911 */ 912 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 913 (cmd->opcode != SD_IO_RW_EXTENDED)) 914 mode |= SDHCI_TRNS_AUTO_CMD12; 915 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 916 mode |= SDHCI_TRNS_AUTO_CMD23; 917 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); 918 } 919 } 920 921 if (data->flags & MMC_DATA_READ) 922 mode |= SDHCI_TRNS_READ; 923 if (host->flags & SDHCI_REQ_USE_DMA) 924 mode |= SDHCI_TRNS_DMA; 925 926 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 927 } 928 929 static void sdhci_finish_data(struct sdhci_host *host) 930 { 931 struct mmc_data *data; 932 933 BUG_ON(!host->data); 934 935 data = host->data; 936 host->data = NULL; 937 938 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 939 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 940 sdhci_adma_table_post(host, data); 941 942 /* 943 * The specification states that the block count register must 944 * be updated, but it does not specify at what point in the 945 * data flow. That makes the register entirely useless to read 946 * back so we have to assume that nothing made it to the card 947 * in the event of an error. 948 */ 949 if (data->error) 950 data->bytes_xfered = 0; 951 else 952 data->bytes_xfered = data->blksz * data->blocks; 953 954 /* 955 * Need to send CMD12 if - 956 * a) open-ended multiblock transfer (no CMD23) 957 * b) error in multiblock transfer 958 */ 959 if (data->stop && 960 (data->error || 961 !host->mrq->sbc)) { 962 963 /* 964 * The controller needs a reset of internal state machines 965 * upon error conditions. 966 */ 967 if (data->error) { 968 sdhci_do_reset(host, SDHCI_RESET_CMD); 969 sdhci_do_reset(host, SDHCI_RESET_DATA); 970 } 971 972 sdhci_send_command(host, data->stop); 973 } else 974 tasklet_schedule(&host->finish_tasklet); 975 } 976 977 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 978 { 979 int flags; 980 u32 mask; 981 unsigned long timeout; 982 983 WARN_ON(host->cmd); 984 985 /* Initially, a command has no error */ 986 cmd->error = 0; 987 988 /* Wait max 10 ms */ 989 timeout = 10; 990 991 mask = SDHCI_CMD_INHIBIT; 992 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 993 mask |= SDHCI_DATA_INHIBIT; 994 995 /* We shouldn't wait for data inihibit for stop commands, even 996 though they might use busy signaling */ 997 if (host->mrq->data && (cmd == host->mrq->data->stop)) 998 mask &= ~SDHCI_DATA_INHIBIT; 999 1000 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1001 if (timeout == 0) { 1002 pr_err("%s: Controller never released inhibit bit(s).\n", 1003 mmc_hostname(host->mmc)); 1004 sdhci_dumpregs(host); 1005 cmd->error = -EIO; 1006 tasklet_schedule(&host->finish_tasklet); 1007 return; 1008 } 1009 timeout--; 1010 mdelay(1); 1011 } 1012 1013 timeout = jiffies; 1014 if (!cmd->data && cmd->busy_timeout > 9000) 1015 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1016 else 1017 timeout += 10 * HZ; 1018 mod_timer(&host->timer, timeout); 1019 1020 host->cmd = cmd; 1021 host->busy_handle = 0; 1022 1023 sdhci_prepare_data(host, cmd); 1024 1025 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1026 1027 sdhci_set_transfer_mode(host, cmd); 1028 1029 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1030 pr_err("%s: Unsupported response type!\n", 1031 mmc_hostname(host->mmc)); 1032 cmd->error = -EINVAL; 1033 tasklet_schedule(&host->finish_tasklet); 1034 return; 1035 } 1036 1037 if (!(cmd->flags & MMC_RSP_PRESENT)) 1038 flags = SDHCI_CMD_RESP_NONE; 1039 else if (cmd->flags & MMC_RSP_136) 1040 flags = SDHCI_CMD_RESP_LONG; 1041 else if (cmd->flags & MMC_RSP_BUSY) 1042 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1043 else 1044 flags = SDHCI_CMD_RESP_SHORT; 1045 1046 if (cmd->flags & MMC_RSP_CRC) 1047 flags |= SDHCI_CMD_CRC; 1048 if (cmd->flags & MMC_RSP_OPCODE) 1049 flags |= SDHCI_CMD_INDEX; 1050 1051 /* CMD19 is special in that the Data Present Select should be set */ 1052 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1053 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1054 flags |= SDHCI_CMD_DATA; 1055 1056 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1057 } 1058 EXPORT_SYMBOL_GPL(sdhci_send_command); 1059 1060 static void sdhci_finish_command(struct sdhci_host *host) 1061 { 1062 int i; 1063 1064 BUG_ON(host->cmd == NULL); 1065 1066 if (host->cmd->flags & MMC_RSP_PRESENT) { 1067 if (host->cmd->flags & MMC_RSP_136) { 1068 /* CRC is stripped so we need to do some shifting. */ 1069 for (i = 0;i < 4;i++) { 1070 host->cmd->resp[i] = sdhci_readl(host, 1071 SDHCI_RESPONSE + (3-i)*4) << 8; 1072 if (i != 3) 1073 host->cmd->resp[i] |= 1074 sdhci_readb(host, 1075 SDHCI_RESPONSE + (3-i)*4-1); 1076 } 1077 } else { 1078 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1079 } 1080 } 1081 1082 /* Finished CMD23, now send actual command. */ 1083 if (host->cmd == host->mrq->sbc) { 1084 host->cmd = NULL; 1085 sdhci_send_command(host, host->mrq->cmd); 1086 } else { 1087 1088 /* Processed actual command. */ 1089 if (host->data && host->data_early) 1090 sdhci_finish_data(host); 1091 1092 if (!host->cmd->data) 1093 tasklet_schedule(&host->finish_tasklet); 1094 1095 host->cmd = NULL; 1096 } 1097 } 1098 1099 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1100 { 1101 u16 preset = 0; 1102 1103 switch (host->timing) { 1104 case MMC_TIMING_UHS_SDR12: 1105 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1106 break; 1107 case MMC_TIMING_UHS_SDR25: 1108 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1109 break; 1110 case MMC_TIMING_UHS_SDR50: 1111 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1112 break; 1113 case MMC_TIMING_UHS_SDR104: 1114 case MMC_TIMING_MMC_HS200: 1115 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1116 break; 1117 case MMC_TIMING_UHS_DDR50: 1118 case MMC_TIMING_MMC_DDR52: 1119 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1120 break; 1121 case MMC_TIMING_MMC_HS400: 1122 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1123 break; 1124 default: 1125 pr_warn("%s: Invalid UHS-I mode selected\n", 1126 mmc_hostname(host->mmc)); 1127 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1128 break; 1129 } 1130 return preset; 1131 } 1132 1133 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1134 unsigned int *actual_clock) 1135 { 1136 int div = 0; /* Initialized for compiler warning */ 1137 int real_div = div, clk_mul = 1; 1138 u16 clk = 0; 1139 bool switch_base_clk = false; 1140 1141 if (host->version >= SDHCI_SPEC_300) { 1142 if (host->preset_enabled) { 1143 u16 pre_val; 1144 1145 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1146 pre_val = sdhci_get_preset_value(host); 1147 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1148 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1149 if (host->clk_mul && 1150 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1151 clk = SDHCI_PROG_CLOCK_MODE; 1152 real_div = div + 1; 1153 clk_mul = host->clk_mul; 1154 } else { 1155 real_div = max_t(int, 1, div << 1); 1156 } 1157 goto clock_set; 1158 } 1159 1160 /* 1161 * Check if the Host Controller supports Programmable Clock 1162 * Mode. 1163 */ 1164 if (host->clk_mul) { 1165 for (div = 1; div <= 1024; div++) { 1166 if ((host->max_clk * host->clk_mul / div) 1167 <= clock) 1168 break; 1169 } 1170 if ((host->max_clk * host->clk_mul / div) <= clock) { 1171 /* 1172 * Set Programmable Clock Mode in the Clock 1173 * Control register. 1174 */ 1175 clk = SDHCI_PROG_CLOCK_MODE; 1176 real_div = div; 1177 clk_mul = host->clk_mul; 1178 div--; 1179 } else { 1180 /* 1181 * Divisor can be too small to reach clock 1182 * speed requirement. Then use the base clock. 1183 */ 1184 switch_base_clk = true; 1185 } 1186 } 1187 1188 if (!host->clk_mul || switch_base_clk) { 1189 /* Version 3.00 divisors must be a multiple of 2. */ 1190 if (host->max_clk <= clock) 1191 div = 1; 1192 else { 1193 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1194 div += 2) { 1195 if ((host->max_clk / div) <= clock) 1196 break; 1197 } 1198 } 1199 real_div = div; 1200 div >>= 1; 1201 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1202 && !div && host->max_clk <= 25000000) 1203 div = 1; 1204 } 1205 } else { 1206 /* Version 2.00 divisors must be a power of 2. */ 1207 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1208 if ((host->max_clk / div) <= clock) 1209 break; 1210 } 1211 real_div = div; 1212 div >>= 1; 1213 } 1214 1215 clock_set: 1216 if (real_div) 1217 *actual_clock = (host->max_clk * clk_mul) / real_div; 1218 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1219 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1220 << SDHCI_DIVIDER_HI_SHIFT; 1221 1222 return clk; 1223 } 1224 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1225 1226 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1227 { 1228 u16 clk; 1229 unsigned long timeout; 1230 1231 host->mmc->actual_clock = 0; 1232 1233 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1234 1235 if (clock == 0) 1236 return; 1237 1238 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1239 1240 clk |= SDHCI_CLOCK_INT_EN; 1241 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1242 1243 /* Wait max 20 ms */ 1244 timeout = 20; 1245 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1246 & SDHCI_CLOCK_INT_STABLE)) { 1247 if (timeout == 0) { 1248 pr_err("%s: Internal clock never stabilised.\n", 1249 mmc_hostname(host->mmc)); 1250 sdhci_dumpregs(host); 1251 return; 1252 } 1253 timeout--; 1254 mdelay(1); 1255 } 1256 1257 clk |= SDHCI_CLOCK_CARD_EN; 1258 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1259 } 1260 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1261 1262 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1263 unsigned short vdd) 1264 { 1265 struct mmc_host *mmc = host->mmc; 1266 1267 spin_unlock_irq(&host->lock); 1268 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1269 spin_lock_irq(&host->lock); 1270 1271 if (mode != MMC_POWER_OFF) 1272 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1273 else 1274 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1275 } 1276 1277 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1278 unsigned short vdd) 1279 { 1280 u8 pwr = 0; 1281 1282 if (mode != MMC_POWER_OFF) { 1283 switch (1 << vdd) { 1284 case MMC_VDD_165_195: 1285 pwr = SDHCI_POWER_180; 1286 break; 1287 case MMC_VDD_29_30: 1288 case MMC_VDD_30_31: 1289 pwr = SDHCI_POWER_300; 1290 break; 1291 case MMC_VDD_32_33: 1292 case MMC_VDD_33_34: 1293 pwr = SDHCI_POWER_330; 1294 break; 1295 default: 1296 WARN(1, "%s: Invalid vdd %#x\n", 1297 mmc_hostname(host->mmc), vdd); 1298 break; 1299 } 1300 } 1301 1302 if (host->pwr == pwr) 1303 return; 1304 1305 host->pwr = pwr; 1306 1307 if (pwr == 0) { 1308 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1309 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1310 sdhci_runtime_pm_bus_off(host); 1311 } else { 1312 /* 1313 * Spec says that we should clear the power reg before setting 1314 * a new value. Some controllers don't seem to like this though. 1315 */ 1316 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1317 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1318 1319 /* 1320 * At least the Marvell CaFe chip gets confused if we set the 1321 * voltage and set turn on power at the same time, so set the 1322 * voltage first. 1323 */ 1324 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1325 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1326 1327 pwr |= SDHCI_POWER_ON; 1328 1329 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1330 1331 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1332 sdhci_runtime_pm_bus_on(host); 1333 1334 /* 1335 * Some controllers need an extra 10ms delay of 10ms before 1336 * they can apply clock after applying power 1337 */ 1338 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1339 mdelay(10); 1340 } 1341 } 1342 EXPORT_SYMBOL_GPL(sdhci_set_power); 1343 1344 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1345 unsigned short vdd) 1346 { 1347 struct mmc_host *mmc = host->mmc; 1348 1349 if (host->ops->set_power) 1350 host->ops->set_power(host, mode, vdd); 1351 else if (!IS_ERR(mmc->supply.vmmc)) 1352 sdhci_set_power_reg(host, mode, vdd); 1353 else 1354 sdhci_set_power(host, mode, vdd); 1355 } 1356 1357 /*****************************************************************************\ 1358 * * 1359 * MMC callbacks * 1360 * * 1361 \*****************************************************************************/ 1362 1363 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1364 { 1365 struct sdhci_host *host; 1366 int present; 1367 unsigned long flags; 1368 1369 host = mmc_priv(mmc); 1370 1371 /* Firstly check card presence */ 1372 present = mmc->ops->get_cd(mmc); 1373 1374 spin_lock_irqsave(&host->lock, flags); 1375 1376 WARN_ON(host->mrq != NULL); 1377 1378 sdhci_led_activate(host); 1379 1380 /* 1381 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1382 * requests if Auto-CMD12 is enabled. 1383 */ 1384 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 1385 if (mrq->stop) { 1386 mrq->data->stop = NULL; 1387 mrq->stop = NULL; 1388 } 1389 } 1390 1391 host->mrq = mrq; 1392 1393 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1394 host->mrq->cmd->error = -ENOMEDIUM; 1395 tasklet_schedule(&host->finish_tasklet); 1396 } else { 1397 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1398 sdhci_send_command(host, mrq->sbc); 1399 else 1400 sdhci_send_command(host, mrq->cmd); 1401 } 1402 1403 mmiowb(); 1404 spin_unlock_irqrestore(&host->lock, flags); 1405 } 1406 1407 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1408 { 1409 u8 ctrl; 1410 1411 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1412 if (width == MMC_BUS_WIDTH_8) { 1413 ctrl &= ~SDHCI_CTRL_4BITBUS; 1414 if (host->version >= SDHCI_SPEC_300) 1415 ctrl |= SDHCI_CTRL_8BITBUS; 1416 } else { 1417 if (host->version >= SDHCI_SPEC_300) 1418 ctrl &= ~SDHCI_CTRL_8BITBUS; 1419 if (width == MMC_BUS_WIDTH_4) 1420 ctrl |= SDHCI_CTRL_4BITBUS; 1421 else 1422 ctrl &= ~SDHCI_CTRL_4BITBUS; 1423 } 1424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1425 } 1426 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1427 1428 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1429 { 1430 u16 ctrl_2; 1431 1432 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1433 /* Select Bus Speed Mode for host */ 1434 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1435 if ((timing == MMC_TIMING_MMC_HS200) || 1436 (timing == MMC_TIMING_UHS_SDR104)) 1437 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1438 else if (timing == MMC_TIMING_UHS_SDR12) 1439 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1440 else if (timing == MMC_TIMING_UHS_SDR25) 1441 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1442 else if (timing == MMC_TIMING_UHS_SDR50) 1443 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1444 else if ((timing == MMC_TIMING_UHS_DDR50) || 1445 (timing == MMC_TIMING_MMC_DDR52)) 1446 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1447 else if (timing == MMC_TIMING_MMC_HS400) 1448 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1449 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1450 } 1451 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1452 1453 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1454 { 1455 struct sdhci_host *host = mmc_priv(mmc); 1456 unsigned long flags; 1457 u8 ctrl; 1458 1459 spin_lock_irqsave(&host->lock, flags); 1460 1461 if (host->flags & SDHCI_DEVICE_DEAD) { 1462 spin_unlock_irqrestore(&host->lock, flags); 1463 if (!IS_ERR(mmc->supply.vmmc) && 1464 ios->power_mode == MMC_POWER_OFF) 1465 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1466 return; 1467 } 1468 1469 /* 1470 * Reset the chip on each power off. 1471 * Should clear out any weird states. 1472 */ 1473 if (ios->power_mode == MMC_POWER_OFF) { 1474 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1475 sdhci_reinit(host); 1476 } 1477 1478 if (host->version >= SDHCI_SPEC_300 && 1479 (ios->power_mode == MMC_POWER_UP) && 1480 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1481 sdhci_enable_preset_value(host, false); 1482 1483 if (!ios->clock || ios->clock != host->clock) { 1484 host->ops->set_clock(host, ios->clock); 1485 host->clock = ios->clock; 1486 1487 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1488 host->clock) { 1489 host->timeout_clk = host->mmc->actual_clock ? 1490 host->mmc->actual_clock / 1000 : 1491 host->clock / 1000; 1492 host->mmc->max_busy_timeout = 1493 host->ops->get_max_timeout_count ? 1494 host->ops->get_max_timeout_count(host) : 1495 1 << 27; 1496 host->mmc->max_busy_timeout /= host->timeout_clk; 1497 } 1498 } 1499 1500 __sdhci_set_power(host, ios->power_mode, ios->vdd); 1501 1502 if (host->ops->platform_send_init_74_clocks) 1503 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1504 1505 host->ops->set_bus_width(host, ios->bus_width); 1506 1507 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1508 1509 if ((ios->timing == MMC_TIMING_SD_HS || 1510 ios->timing == MMC_TIMING_MMC_HS) 1511 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1512 ctrl |= SDHCI_CTRL_HISPD; 1513 else 1514 ctrl &= ~SDHCI_CTRL_HISPD; 1515 1516 if (host->version >= SDHCI_SPEC_300) { 1517 u16 clk, ctrl_2; 1518 1519 /* In case of UHS-I modes, set High Speed Enable */ 1520 if ((ios->timing == MMC_TIMING_MMC_HS400) || 1521 (ios->timing == MMC_TIMING_MMC_HS200) || 1522 (ios->timing == MMC_TIMING_MMC_DDR52) || 1523 (ios->timing == MMC_TIMING_UHS_SDR50) || 1524 (ios->timing == MMC_TIMING_UHS_SDR104) || 1525 (ios->timing == MMC_TIMING_UHS_DDR50) || 1526 (ios->timing == MMC_TIMING_UHS_SDR25)) 1527 ctrl |= SDHCI_CTRL_HISPD; 1528 1529 if (!host->preset_enabled) { 1530 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1531 /* 1532 * We only need to set Driver Strength if the 1533 * preset value enable is not set. 1534 */ 1535 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1536 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1537 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1538 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1539 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 1540 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1541 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1542 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1543 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 1544 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 1545 else { 1546 pr_warn("%s: invalid driver type, default to driver type B\n", 1547 mmc_hostname(mmc)); 1548 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1549 } 1550 1551 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1552 } else { 1553 /* 1554 * According to SDHC Spec v3.00, if the Preset Value 1555 * Enable in the Host Control 2 register is set, we 1556 * need to reset SD Clock Enable before changing High 1557 * Speed Enable to avoid generating clock gliches. 1558 */ 1559 1560 /* Reset SD Clock Enable */ 1561 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1562 clk &= ~SDHCI_CLOCK_CARD_EN; 1563 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1564 1565 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1566 1567 /* Re-enable SD Clock */ 1568 host->ops->set_clock(host, host->clock); 1569 } 1570 1571 /* Reset SD Clock Enable */ 1572 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1573 clk &= ~SDHCI_CLOCK_CARD_EN; 1574 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1575 1576 host->ops->set_uhs_signaling(host, ios->timing); 1577 host->timing = ios->timing; 1578 1579 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1580 ((ios->timing == MMC_TIMING_UHS_SDR12) || 1581 (ios->timing == MMC_TIMING_UHS_SDR25) || 1582 (ios->timing == MMC_TIMING_UHS_SDR50) || 1583 (ios->timing == MMC_TIMING_UHS_SDR104) || 1584 (ios->timing == MMC_TIMING_UHS_DDR50) || 1585 (ios->timing == MMC_TIMING_MMC_DDR52))) { 1586 u16 preset; 1587 1588 sdhci_enable_preset_value(host, true); 1589 preset = sdhci_get_preset_value(host); 1590 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 1591 >> SDHCI_PRESET_DRV_SHIFT; 1592 } 1593 1594 /* Re-enable SD Clock */ 1595 host->ops->set_clock(host, host->clock); 1596 } else 1597 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1598 1599 /* 1600 * Some (ENE) controllers go apeshit on some ios operation, 1601 * signalling timeout and CRC errors even on CMD0. Resetting 1602 * it on each ios seems to solve the problem. 1603 */ 1604 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1605 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1606 1607 mmiowb(); 1608 spin_unlock_irqrestore(&host->lock, flags); 1609 } 1610 1611 static int sdhci_get_cd(struct mmc_host *mmc) 1612 { 1613 struct sdhci_host *host = mmc_priv(mmc); 1614 int gpio_cd = mmc_gpio_get_cd(mmc); 1615 1616 if (host->flags & SDHCI_DEVICE_DEAD) 1617 return 0; 1618 1619 /* If nonremovable, assume that the card is always present. */ 1620 if (host->mmc->caps & MMC_CAP_NONREMOVABLE) 1621 return 1; 1622 1623 /* 1624 * Try slot gpio detect, if defined it take precedence 1625 * over build in controller functionality 1626 */ 1627 if (gpio_cd >= 0) 1628 return !!gpio_cd; 1629 1630 /* If polling, assume that the card is always present. */ 1631 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1632 return 1; 1633 1634 /* Host native card detect */ 1635 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1636 } 1637 1638 static int sdhci_check_ro(struct sdhci_host *host) 1639 { 1640 unsigned long flags; 1641 int is_readonly; 1642 1643 spin_lock_irqsave(&host->lock, flags); 1644 1645 if (host->flags & SDHCI_DEVICE_DEAD) 1646 is_readonly = 0; 1647 else if (host->ops->get_ro) 1648 is_readonly = host->ops->get_ro(host); 1649 else 1650 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1651 & SDHCI_WRITE_PROTECT); 1652 1653 spin_unlock_irqrestore(&host->lock, flags); 1654 1655 /* This quirk needs to be replaced by a callback-function later */ 1656 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1657 !is_readonly : is_readonly; 1658 } 1659 1660 #define SAMPLE_COUNT 5 1661 1662 static int sdhci_get_ro(struct mmc_host *mmc) 1663 { 1664 struct sdhci_host *host = mmc_priv(mmc); 1665 int i, ro_count; 1666 1667 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1668 return sdhci_check_ro(host); 1669 1670 ro_count = 0; 1671 for (i = 0; i < SAMPLE_COUNT; i++) { 1672 if (sdhci_check_ro(host)) { 1673 if (++ro_count > SAMPLE_COUNT / 2) 1674 return 1; 1675 } 1676 msleep(30); 1677 } 1678 return 0; 1679 } 1680 1681 static void sdhci_hw_reset(struct mmc_host *mmc) 1682 { 1683 struct sdhci_host *host = mmc_priv(mmc); 1684 1685 if (host->ops && host->ops->hw_reset) 1686 host->ops->hw_reset(host); 1687 } 1688 1689 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1690 { 1691 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 1692 if (enable) 1693 host->ier |= SDHCI_INT_CARD_INT; 1694 else 1695 host->ier &= ~SDHCI_INT_CARD_INT; 1696 1697 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1698 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1699 mmiowb(); 1700 } 1701 } 1702 1703 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1704 { 1705 struct sdhci_host *host = mmc_priv(mmc); 1706 unsigned long flags; 1707 1708 spin_lock_irqsave(&host->lock, flags); 1709 if (enable) 1710 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1711 else 1712 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1713 1714 sdhci_enable_sdio_irq_nolock(host, enable); 1715 spin_unlock_irqrestore(&host->lock, flags); 1716 } 1717 1718 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1719 struct mmc_ios *ios) 1720 { 1721 struct sdhci_host *host = mmc_priv(mmc); 1722 u16 ctrl; 1723 int ret; 1724 1725 /* 1726 * Signal Voltage Switching is only applicable for Host Controllers 1727 * v3.00 and above. 1728 */ 1729 if (host->version < SDHCI_SPEC_300) 1730 return 0; 1731 1732 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1733 1734 switch (ios->signal_voltage) { 1735 case MMC_SIGNAL_VOLTAGE_330: 1736 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1737 ctrl &= ~SDHCI_CTRL_VDD_180; 1738 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1739 1740 if (!IS_ERR(mmc->supply.vqmmc)) { 1741 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000, 1742 3600000); 1743 if (ret) { 1744 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 1745 mmc_hostname(mmc)); 1746 return -EIO; 1747 } 1748 } 1749 /* Wait for 5ms */ 1750 usleep_range(5000, 5500); 1751 1752 /* 3.3V regulator output should be stable within 5 ms */ 1753 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1754 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1755 return 0; 1756 1757 pr_warn("%s: 3.3V regulator output did not became stable\n", 1758 mmc_hostname(mmc)); 1759 1760 return -EAGAIN; 1761 case MMC_SIGNAL_VOLTAGE_180: 1762 if (!IS_ERR(mmc->supply.vqmmc)) { 1763 ret = regulator_set_voltage(mmc->supply.vqmmc, 1764 1700000, 1950000); 1765 if (ret) { 1766 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 1767 mmc_hostname(mmc)); 1768 return -EIO; 1769 } 1770 } 1771 1772 /* 1773 * Enable 1.8V Signal Enable in the Host Control2 1774 * register 1775 */ 1776 ctrl |= SDHCI_CTRL_VDD_180; 1777 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1778 1779 /* Some controller need to do more when switching */ 1780 if (host->ops->voltage_switch) 1781 host->ops->voltage_switch(host); 1782 1783 /* 1.8V regulator output should be stable within 5 ms */ 1784 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1785 if (ctrl & SDHCI_CTRL_VDD_180) 1786 return 0; 1787 1788 pr_warn("%s: 1.8V regulator output did not became stable\n", 1789 mmc_hostname(mmc)); 1790 1791 return -EAGAIN; 1792 case MMC_SIGNAL_VOLTAGE_120: 1793 if (!IS_ERR(mmc->supply.vqmmc)) { 1794 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000, 1795 1300000); 1796 if (ret) { 1797 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 1798 mmc_hostname(mmc)); 1799 return -EIO; 1800 } 1801 } 1802 return 0; 1803 default: 1804 /* No signal voltage switch required */ 1805 return 0; 1806 } 1807 } 1808 1809 static int sdhci_card_busy(struct mmc_host *mmc) 1810 { 1811 struct sdhci_host *host = mmc_priv(mmc); 1812 u32 present_state; 1813 1814 /* Check whether DAT[3:0] is 0000 */ 1815 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1816 1817 return !(present_state & SDHCI_DATA_LVL_MASK); 1818 } 1819 1820 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 1821 { 1822 struct sdhci_host *host = mmc_priv(mmc); 1823 unsigned long flags; 1824 1825 spin_lock_irqsave(&host->lock, flags); 1826 host->flags |= SDHCI_HS400_TUNING; 1827 spin_unlock_irqrestore(&host->lock, flags); 1828 1829 return 0; 1830 } 1831 1832 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1833 { 1834 struct sdhci_host *host = mmc_priv(mmc); 1835 u16 ctrl; 1836 int tuning_loop_counter = MAX_TUNING_LOOP; 1837 int err = 0; 1838 unsigned long flags; 1839 unsigned int tuning_count = 0; 1840 bool hs400_tuning; 1841 1842 spin_lock_irqsave(&host->lock, flags); 1843 1844 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 1845 host->flags &= ~SDHCI_HS400_TUNING; 1846 1847 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 1848 tuning_count = host->tuning_count; 1849 1850 /* 1851 * The Host Controller needs tuning in case of SDR104 and DDR50 1852 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 1853 * the Capabilities register. 1854 * If the Host Controller supports the HS200 mode then the 1855 * tuning function has to be executed. 1856 */ 1857 switch (host->timing) { 1858 /* HS400 tuning is done in HS200 mode */ 1859 case MMC_TIMING_MMC_HS400: 1860 err = -EINVAL; 1861 goto out_unlock; 1862 1863 case MMC_TIMING_MMC_HS200: 1864 /* 1865 * Periodic re-tuning for HS400 is not expected to be needed, so 1866 * disable it here. 1867 */ 1868 if (hs400_tuning) 1869 tuning_count = 0; 1870 break; 1871 1872 case MMC_TIMING_UHS_SDR104: 1873 case MMC_TIMING_UHS_DDR50: 1874 break; 1875 1876 case MMC_TIMING_UHS_SDR50: 1877 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 1878 break; 1879 /* FALLTHROUGH */ 1880 1881 default: 1882 goto out_unlock; 1883 } 1884 1885 if (host->ops->platform_execute_tuning) { 1886 spin_unlock_irqrestore(&host->lock, flags); 1887 err = host->ops->platform_execute_tuning(host, opcode); 1888 return err; 1889 } 1890 1891 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1892 ctrl |= SDHCI_CTRL_EXEC_TUNING; 1893 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 1894 ctrl |= SDHCI_CTRL_TUNED_CLK; 1895 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1896 1897 /* 1898 * As per the Host Controller spec v3.00, tuning command 1899 * generates Buffer Read Ready interrupt, so enable that. 1900 * 1901 * Note: The spec clearly says that when tuning sequence 1902 * is being performed, the controller does not generate 1903 * interrupts other than Buffer Read Ready interrupt. But 1904 * to make sure we don't hit a controller bug, we _only_ 1905 * enable Buffer Read Ready interrupt here. 1906 */ 1907 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 1908 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 1909 1910 /* 1911 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number 1912 * of loops reaches 40 times or a timeout of 150ms occurs. 1913 */ 1914 do { 1915 struct mmc_command cmd = {0}; 1916 struct mmc_request mrq = {NULL}; 1917 1918 cmd.opcode = opcode; 1919 cmd.arg = 0; 1920 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1921 cmd.retries = 0; 1922 cmd.data = NULL; 1923 cmd.error = 0; 1924 1925 if (tuning_loop_counter-- == 0) 1926 break; 1927 1928 mrq.cmd = &cmd; 1929 host->mrq = &mrq; 1930 1931 /* 1932 * In response to CMD19, the card sends 64 bytes of tuning 1933 * block to the Host Controller. So we set the block size 1934 * to 64 here. 1935 */ 1936 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { 1937 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) 1938 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), 1939 SDHCI_BLOCK_SIZE); 1940 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) 1941 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1942 SDHCI_BLOCK_SIZE); 1943 } else { 1944 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1945 SDHCI_BLOCK_SIZE); 1946 } 1947 1948 /* 1949 * The tuning block is sent by the card to the host controller. 1950 * So we set the TRNS_READ bit in the Transfer Mode register. 1951 * This also takes care of setting DMA Enable and Multi Block 1952 * Select in the same register to 0. 1953 */ 1954 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 1955 1956 sdhci_send_command(host, &cmd); 1957 1958 host->cmd = NULL; 1959 host->mrq = NULL; 1960 1961 spin_unlock_irqrestore(&host->lock, flags); 1962 /* Wait for Buffer Read Ready interrupt */ 1963 wait_event_interruptible_timeout(host->buf_ready_int, 1964 (host->tuning_done == 1), 1965 msecs_to_jiffies(50)); 1966 spin_lock_irqsave(&host->lock, flags); 1967 1968 if (!host->tuning_done) { 1969 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); 1970 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1971 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1972 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 1973 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1974 1975 err = -EIO; 1976 goto out; 1977 } 1978 1979 host->tuning_done = 0; 1980 1981 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1982 1983 /* eMMC spec does not require a delay between tuning cycles */ 1984 if (opcode == MMC_SEND_TUNING_BLOCK) 1985 mdelay(1); 1986 } while (ctrl & SDHCI_CTRL_EXEC_TUNING); 1987 1988 /* 1989 * The Host Driver has exhausted the maximum number of loops allowed, 1990 * so use fixed sampling frequency. 1991 */ 1992 if (tuning_loop_counter < 0) { 1993 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1994 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1995 } 1996 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 1997 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n"); 1998 err = -EIO; 1999 } 2000 2001 out: 2002 if (tuning_count) { 2003 /* 2004 * In case tuning fails, host controllers which support 2005 * re-tuning can try tuning again at a later time, when the 2006 * re-tuning timer expires. So for these controllers, we 2007 * return 0. Since there might be other controllers who do not 2008 * have this capability, we return error for them. 2009 */ 2010 err = 0; 2011 } 2012 2013 host->mmc->retune_period = err ? 0 : tuning_count; 2014 2015 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2016 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2017 out_unlock: 2018 spin_unlock_irqrestore(&host->lock, flags); 2019 return err; 2020 } 2021 2022 static int sdhci_select_drive_strength(struct mmc_card *card, 2023 unsigned int max_dtr, int host_drv, 2024 int card_drv, int *drv_type) 2025 { 2026 struct sdhci_host *host = mmc_priv(card->host); 2027 2028 if (!host->ops->select_drive_strength) 2029 return 0; 2030 2031 return host->ops->select_drive_strength(host, card, max_dtr, host_drv, 2032 card_drv, drv_type); 2033 } 2034 2035 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2036 { 2037 /* Host Controller v3.00 defines preset value registers */ 2038 if (host->version < SDHCI_SPEC_300) 2039 return; 2040 2041 /* 2042 * We only enable or disable Preset Value if they are not already 2043 * enabled or disabled respectively. Otherwise, we bail out. 2044 */ 2045 if (host->preset_enabled != enable) { 2046 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2047 2048 if (enable) 2049 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2050 else 2051 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2052 2053 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2054 2055 if (enable) 2056 host->flags |= SDHCI_PV_ENABLED; 2057 else 2058 host->flags &= ~SDHCI_PV_ENABLED; 2059 2060 host->preset_enabled = enable; 2061 } 2062 } 2063 2064 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2065 int err) 2066 { 2067 struct sdhci_host *host = mmc_priv(mmc); 2068 struct mmc_data *data = mrq->data; 2069 2070 if (data->host_cookie != COOKIE_UNMAPPED) 2071 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2072 data->flags & MMC_DATA_WRITE ? 2073 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2074 2075 data->host_cookie = COOKIE_UNMAPPED; 2076 } 2077 2078 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 2079 bool is_first_req) 2080 { 2081 struct sdhci_host *host = mmc_priv(mmc); 2082 2083 mrq->data->host_cookie = COOKIE_UNMAPPED; 2084 2085 if (host->flags & SDHCI_REQ_USE_DMA) 2086 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2087 } 2088 2089 static void sdhci_card_event(struct mmc_host *mmc) 2090 { 2091 struct sdhci_host *host = mmc_priv(mmc); 2092 unsigned long flags; 2093 int present; 2094 2095 /* First check if client has provided their own card event */ 2096 if (host->ops->card_event) 2097 host->ops->card_event(host); 2098 2099 present = sdhci_get_cd(host->mmc); 2100 2101 spin_lock_irqsave(&host->lock, flags); 2102 2103 /* Check host->mrq first in case we are runtime suspended */ 2104 if (host->mrq && !present) { 2105 pr_err("%s: Card removed during transfer!\n", 2106 mmc_hostname(host->mmc)); 2107 pr_err("%s: Resetting controller.\n", 2108 mmc_hostname(host->mmc)); 2109 2110 sdhci_do_reset(host, SDHCI_RESET_CMD); 2111 sdhci_do_reset(host, SDHCI_RESET_DATA); 2112 2113 host->mrq->cmd->error = -ENOMEDIUM; 2114 tasklet_schedule(&host->finish_tasklet); 2115 } 2116 2117 spin_unlock_irqrestore(&host->lock, flags); 2118 } 2119 2120 static const struct mmc_host_ops sdhci_ops = { 2121 .request = sdhci_request, 2122 .post_req = sdhci_post_req, 2123 .pre_req = sdhci_pre_req, 2124 .set_ios = sdhci_set_ios, 2125 .get_cd = sdhci_get_cd, 2126 .get_ro = sdhci_get_ro, 2127 .hw_reset = sdhci_hw_reset, 2128 .enable_sdio_irq = sdhci_enable_sdio_irq, 2129 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2130 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2131 .execute_tuning = sdhci_execute_tuning, 2132 .select_drive_strength = sdhci_select_drive_strength, 2133 .card_event = sdhci_card_event, 2134 .card_busy = sdhci_card_busy, 2135 }; 2136 2137 /*****************************************************************************\ 2138 * * 2139 * Tasklets * 2140 * * 2141 \*****************************************************************************/ 2142 2143 static void sdhci_tasklet_finish(unsigned long param) 2144 { 2145 struct sdhci_host *host; 2146 unsigned long flags; 2147 struct mmc_request *mrq; 2148 2149 host = (struct sdhci_host*)param; 2150 2151 spin_lock_irqsave(&host->lock, flags); 2152 2153 /* 2154 * If this tasklet gets rescheduled while running, it will 2155 * be run again afterwards but without any active request. 2156 */ 2157 if (!host->mrq) { 2158 spin_unlock_irqrestore(&host->lock, flags); 2159 return; 2160 } 2161 2162 del_timer(&host->timer); 2163 2164 mrq = host->mrq; 2165 2166 /* 2167 * Always unmap the data buffers if they were mapped by 2168 * sdhci_prepare_data() whenever we finish with a request. 2169 * This avoids leaking DMA mappings on error. 2170 */ 2171 if (host->flags & SDHCI_REQ_USE_DMA) { 2172 struct mmc_data *data = mrq->data; 2173 2174 if (data && data->host_cookie == COOKIE_MAPPED) { 2175 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2176 (data->flags & MMC_DATA_READ) ? 2177 DMA_FROM_DEVICE : DMA_TO_DEVICE); 2178 data->host_cookie = COOKIE_UNMAPPED; 2179 } 2180 } 2181 2182 /* 2183 * The controller needs a reset of internal state machines 2184 * upon error conditions. 2185 */ 2186 if (!(host->flags & SDHCI_DEVICE_DEAD) && 2187 ((mrq->cmd && mrq->cmd->error) || 2188 (mrq->sbc && mrq->sbc->error) || 2189 (mrq->data && ((mrq->data->error && !mrq->data->stop) || 2190 (mrq->data->stop && mrq->data->stop->error))) || 2191 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2192 2193 /* Some controllers need this kick or reset won't work here */ 2194 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2195 /* This is to force an update */ 2196 host->ops->set_clock(host, host->clock); 2197 2198 /* Spec says we should do both at the same time, but Ricoh 2199 controllers do not like that. */ 2200 sdhci_do_reset(host, SDHCI_RESET_CMD); 2201 sdhci_do_reset(host, SDHCI_RESET_DATA); 2202 } 2203 2204 host->mrq = NULL; 2205 host->cmd = NULL; 2206 host->data = NULL; 2207 2208 sdhci_led_deactivate(host); 2209 2210 mmiowb(); 2211 spin_unlock_irqrestore(&host->lock, flags); 2212 2213 mmc_request_done(host->mmc, mrq); 2214 } 2215 2216 static void sdhci_timeout_timer(unsigned long data) 2217 { 2218 struct sdhci_host *host; 2219 unsigned long flags; 2220 2221 host = (struct sdhci_host*)data; 2222 2223 spin_lock_irqsave(&host->lock, flags); 2224 2225 if (host->mrq) { 2226 pr_err("%s: Timeout waiting for hardware interrupt.\n", 2227 mmc_hostname(host->mmc)); 2228 sdhci_dumpregs(host); 2229 2230 if (host->data) { 2231 host->data->error = -ETIMEDOUT; 2232 sdhci_finish_data(host); 2233 } else { 2234 if (host->cmd) 2235 host->cmd->error = -ETIMEDOUT; 2236 else 2237 host->mrq->cmd->error = -ETIMEDOUT; 2238 2239 tasklet_schedule(&host->finish_tasklet); 2240 } 2241 } 2242 2243 mmiowb(); 2244 spin_unlock_irqrestore(&host->lock, flags); 2245 } 2246 2247 /*****************************************************************************\ 2248 * * 2249 * Interrupt handling * 2250 * * 2251 \*****************************************************************************/ 2252 2253 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) 2254 { 2255 BUG_ON(intmask == 0); 2256 2257 if (!host->cmd) { 2258 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 2259 mmc_hostname(host->mmc), (unsigned)intmask); 2260 sdhci_dumpregs(host); 2261 return; 2262 } 2263 2264 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 2265 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 2266 if (intmask & SDHCI_INT_TIMEOUT) 2267 host->cmd->error = -ETIMEDOUT; 2268 else 2269 host->cmd->error = -EILSEQ; 2270 2271 /* 2272 * If this command initiates a data phase and a response 2273 * CRC error is signalled, the card can start transferring 2274 * data - the card may have received the command without 2275 * error. We must not terminate the mmc_request early. 2276 * 2277 * If the card did not receive the command or returned an 2278 * error which prevented it sending data, the data phase 2279 * will time out. 2280 */ 2281 if (host->cmd->data && 2282 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 2283 SDHCI_INT_CRC) { 2284 host->cmd = NULL; 2285 return; 2286 } 2287 2288 tasklet_schedule(&host->finish_tasklet); 2289 return; 2290 } 2291 2292 /* 2293 * The host can send and interrupt when the busy state has 2294 * ended, allowing us to wait without wasting CPU cycles. 2295 * Unfortunately this is overloaded on the "data complete" 2296 * interrupt, so we need to take some care when handling 2297 * it. 2298 * 2299 * Note: The 1.0 specification is a bit ambiguous about this 2300 * feature so there might be some problems with older 2301 * controllers. 2302 */ 2303 if (host->cmd->flags & MMC_RSP_BUSY) { 2304 if (host->cmd->data) 2305 DBG("Cannot wait for busy signal when also doing a data transfer"); 2306 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) 2307 && !host->busy_handle) { 2308 /* Mark that command complete before busy is ended */ 2309 host->busy_handle = 1; 2310 return; 2311 } 2312 2313 /* The controller does not support the end-of-busy IRQ, 2314 * fall through and take the SDHCI_INT_RESPONSE */ 2315 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 2316 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) { 2317 *mask &= ~SDHCI_INT_DATA_END; 2318 } 2319 2320 if (intmask & SDHCI_INT_RESPONSE) 2321 sdhci_finish_command(host); 2322 } 2323 2324 #ifdef CONFIG_MMC_DEBUG 2325 static void sdhci_adma_show_error(struct sdhci_host *host) 2326 { 2327 const char *name = mmc_hostname(host->mmc); 2328 void *desc = host->adma_table; 2329 2330 sdhci_dumpregs(host); 2331 2332 while (true) { 2333 struct sdhci_adma2_64_desc *dma_desc = desc; 2334 2335 if (host->flags & SDHCI_USE_64_BIT_DMA) 2336 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2337 name, desc, le32_to_cpu(dma_desc->addr_hi), 2338 le32_to_cpu(dma_desc->addr_lo), 2339 le16_to_cpu(dma_desc->len), 2340 le16_to_cpu(dma_desc->cmd)); 2341 else 2342 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2343 name, desc, le32_to_cpu(dma_desc->addr_lo), 2344 le16_to_cpu(dma_desc->len), 2345 le16_to_cpu(dma_desc->cmd)); 2346 2347 desc += host->desc_sz; 2348 2349 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2350 break; 2351 } 2352 } 2353 #else 2354 static void sdhci_adma_show_error(struct sdhci_host *host) { } 2355 #endif 2356 2357 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2358 { 2359 u32 command; 2360 BUG_ON(intmask == 0); 2361 2362 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2363 if (intmask & SDHCI_INT_DATA_AVAIL) { 2364 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2365 if (command == MMC_SEND_TUNING_BLOCK || 2366 command == MMC_SEND_TUNING_BLOCK_HS200) { 2367 host->tuning_done = 1; 2368 wake_up(&host->buf_ready_int); 2369 return; 2370 } 2371 } 2372 2373 if (!host->data) { 2374 /* 2375 * The "data complete" interrupt is also used to 2376 * indicate that a busy state has ended. See comment 2377 * above in sdhci_cmd_irq(). 2378 */ 2379 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { 2380 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2381 host->cmd->error = -ETIMEDOUT; 2382 tasklet_schedule(&host->finish_tasklet); 2383 return; 2384 } 2385 if (intmask & SDHCI_INT_DATA_END) { 2386 /* 2387 * Some cards handle busy-end interrupt 2388 * before the command completed, so make 2389 * sure we do things in the proper order. 2390 */ 2391 if (host->busy_handle) 2392 sdhci_finish_command(host); 2393 else 2394 host->busy_handle = 1; 2395 return; 2396 } 2397 } 2398 2399 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 2400 mmc_hostname(host->mmc), (unsigned)intmask); 2401 sdhci_dumpregs(host); 2402 2403 return; 2404 } 2405 2406 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2407 host->data->error = -ETIMEDOUT; 2408 else if (intmask & SDHCI_INT_DATA_END_BIT) 2409 host->data->error = -EILSEQ; 2410 else if ((intmask & SDHCI_INT_DATA_CRC) && 2411 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2412 != MMC_BUS_TEST_R) 2413 host->data->error = -EILSEQ; 2414 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2415 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2416 sdhci_adma_show_error(host); 2417 host->data->error = -EIO; 2418 if (host->ops->adma_workaround) 2419 host->ops->adma_workaround(host, intmask); 2420 } 2421 2422 if (host->data->error) 2423 sdhci_finish_data(host); 2424 else { 2425 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2426 sdhci_transfer_pio(host); 2427 2428 /* 2429 * We currently don't do anything fancy with DMA 2430 * boundaries, but as we can't disable the feature 2431 * we need to at least restart the transfer. 2432 * 2433 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2434 * should return a valid address to continue from, but as 2435 * some controllers are faulty, don't trust them. 2436 */ 2437 if (intmask & SDHCI_INT_DMA_END) { 2438 u32 dmastart, dmanow; 2439 dmastart = sg_dma_address(host->data->sg); 2440 dmanow = dmastart + host->data->bytes_xfered; 2441 /* 2442 * Force update to the next DMA block boundary. 2443 */ 2444 dmanow = (dmanow & 2445 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2446 SDHCI_DEFAULT_BOUNDARY_SIZE; 2447 host->data->bytes_xfered = dmanow - dmastart; 2448 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," 2449 " next 0x%08x\n", 2450 mmc_hostname(host->mmc), dmastart, 2451 host->data->bytes_xfered, dmanow); 2452 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2453 } 2454 2455 if (intmask & SDHCI_INT_DATA_END) { 2456 if (host->cmd) { 2457 /* 2458 * Data managed to finish before the 2459 * command completed. Make sure we do 2460 * things in the proper order. 2461 */ 2462 host->data_early = 1; 2463 } else { 2464 sdhci_finish_data(host); 2465 } 2466 } 2467 } 2468 } 2469 2470 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2471 { 2472 irqreturn_t result = IRQ_NONE; 2473 struct sdhci_host *host = dev_id; 2474 u32 intmask, mask, unexpected = 0; 2475 int max_loops = 16; 2476 2477 spin_lock(&host->lock); 2478 2479 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2480 spin_unlock(&host->lock); 2481 return IRQ_NONE; 2482 } 2483 2484 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2485 if (!intmask || intmask == 0xffffffff) { 2486 result = IRQ_NONE; 2487 goto out; 2488 } 2489 2490 do { 2491 /* Clear selected interrupts. */ 2492 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2493 SDHCI_INT_BUS_POWER); 2494 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2495 2496 DBG("*** %s got interrupt: 0x%08x\n", 2497 mmc_hostname(host->mmc), intmask); 2498 2499 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2500 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2501 SDHCI_CARD_PRESENT; 2502 2503 /* 2504 * There is a observation on i.mx esdhc. INSERT 2505 * bit will be immediately set again when it gets 2506 * cleared, if a card is inserted. We have to mask 2507 * the irq to prevent interrupt storm which will 2508 * freeze the system. And the REMOVE gets the 2509 * same situation. 2510 * 2511 * More testing are needed here to ensure it works 2512 * for other platforms though. 2513 */ 2514 host->ier &= ~(SDHCI_INT_CARD_INSERT | 2515 SDHCI_INT_CARD_REMOVE); 2516 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 2517 SDHCI_INT_CARD_INSERT; 2518 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2519 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2520 2521 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2522 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2523 2524 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 2525 SDHCI_INT_CARD_REMOVE); 2526 result = IRQ_WAKE_THREAD; 2527 } 2528 2529 if (intmask & SDHCI_INT_CMD_MASK) 2530 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, 2531 &intmask); 2532 2533 if (intmask & SDHCI_INT_DATA_MASK) 2534 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2535 2536 if (intmask & SDHCI_INT_BUS_POWER) 2537 pr_err("%s: Card is consuming too much power!\n", 2538 mmc_hostname(host->mmc)); 2539 2540 if (intmask & SDHCI_INT_CARD_INT) { 2541 sdhci_enable_sdio_irq_nolock(host, false); 2542 host->thread_isr |= SDHCI_INT_CARD_INT; 2543 result = IRQ_WAKE_THREAD; 2544 } 2545 2546 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 2547 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2548 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 2549 SDHCI_INT_CARD_INT); 2550 2551 if (intmask) { 2552 unexpected |= intmask; 2553 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2554 } 2555 2556 if (result == IRQ_NONE) 2557 result = IRQ_HANDLED; 2558 2559 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2560 } while (intmask && --max_loops); 2561 out: 2562 spin_unlock(&host->lock); 2563 2564 if (unexpected) { 2565 pr_err("%s: Unexpected interrupt 0x%08x.\n", 2566 mmc_hostname(host->mmc), unexpected); 2567 sdhci_dumpregs(host); 2568 } 2569 2570 return result; 2571 } 2572 2573 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 2574 { 2575 struct sdhci_host *host = dev_id; 2576 unsigned long flags; 2577 u32 isr; 2578 2579 spin_lock_irqsave(&host->lock, flags); 2580 isr = host->thread_isr; 2581 host->thread_isr = 0; 2582 spin_unlock_irqrestore(&host->lock, flags); 2583 2584 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2585 sdhci_card_event(host->mmc); 2586 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 2587 } 2588 2589 if (isr & SDHCI_INT_CARD_INT) { 2590 sdio_run_irqs(host->mmc); 2591 2592 spin_lock_irqsave(&host->lock, flags); 2593 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2594 sdhci_enable_sdio_irq_nolock(host, true); 2595 spin_unlock_irqrestore(&host->lock, flags); 2596 } 2597 2598 return isr ? IRQ_HANDLED : IRQ_NONE; 2599 } 2600 2601 /*****************************************************************************\ 2602 * * 2603 * Suspend/resume * 2604 * * 2605 \*****************************************************************************/ 2606 2607 #ifdef CONFIG_PM 2608 void sdhci_enable_irq_wakeups(struct sdhci_host *host) 2609 { 2610 u8 val; 2611 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2612 | SDHCI_WAKE_ON_INT; 2613 2614 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2615 val |= mask ; 2616 /* Avoid fake wake up */ 2617 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2618 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); 2619 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2620 } 2621 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 2622 2623 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 2624 { 2625 u8 val; 2626 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2627 | SDHCI_WAKE_ON_INT; 2628 2629 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2630 val &= ~mask; 2631 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2632 } 2633 2634 int sdhci_suspend_host(struct sdhci_host *host) 2635 { 2636 sdhci_disable_card_detection(host); 2637 2638 mmc_retune_timer_stop(host->mmc); 2639 mmc_retune_needed(host->mmc); 2640 2641 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2642 host->ier = 0; 2643 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 2644 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2645 free_irq(host->irq, host); 2646 } else { 2647 sdhci_enable_irq_wakeups(host); 2648 enable_irq_wake(host->irq); 2649 } 2650 return 0; 2651 } 2652 2653 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 2654 2655 int sdhci_resume_host(struct sdhci_host *host) 2656 { 2657 int ret = 0; 2658 2659 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2660 if (host->ops->enable_dma) 2661 host->ops->enable_dma(host); 2662 } 2663 2664 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2665 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2666 /* Card keeps power but host controller does not */ 2667 sdhci_init(host, 0); 2668 host->pwr = 0; 2669 host->clock = 0; 2670 sdhci_set_ios(host->mmc, &host->mmc->ios); 2671 } else { 2672 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2673 mmiowb(); 2674 } 2675 2676 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2677 ret = request_threaded_irq(host->irq, sdhci_irq, 2678 sdhci_thread_irq, IRQF_SHARED, 2679 mmc_hostname(host->mmc), host); 2680 if (ret) 2681 return ret; 2682 } else { 2683 sdhci_disable_irq_wakeups(host); 2684 disable_irq_wake(host->irq); 2685 } 2686 2687 sdhci_enable_card_detection(host); 2688 2689 return ret; 2690 } 2691 2692 EXPORT_SYMBOL_GPL(sdhci_resume_host); 2693 2694 int sdhci_runtime_suspend_host(struct sdhci_host *host) 2695 { 2696 unsigned long flags; 2697 2698 mmc_retune_timer_stop(host->mmc); 2699 mmc_retune_needed(host->mmc); 2700 2701 spin_lock_irqsave(&host->lock, flags); 2702 host->ier &= SDHCI_INT_CARD_INT; 2703 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2704 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2705 spin_unlock_irqrestore(&host->lock, flags); 2706 2707 synchronize_hardirq(host->irq); 2708 2709 spin_lock_irqsave(&host->lock, flags); 2710 host->runtime_suspended = true; 2711 spin_unlock_irqrestore(&host->lock, flags); 2712 2713 return 0; 2714 } 2715 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 2716 2717 int sdhci_runtime_resume_host(struct sdhci_host *host) 2718 { 2719 unsigned long flags; 2720 int host_flags = host->flags; 2721 2722 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2723 if (host->ops->enable_dma) 2724 host->ops->enable_dma(host); 2725 } 2726 2727 sdhci_init(host, 0); 2728 2729 /* Force clock and power re-program */ 2730 host->pwr = 0; 2731 host->clock = 0; 2732 sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios); 2733 sdhci_set_ios(host->mmc, &host->mmc->ios); 2734 2735 if ((host_flags & SDHCI_PV_ENABLED) && 2736 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 2737 spin_lock_irqsave(&host->lock, flags); 2738 sdhci_enable_preset_value(host, true); 2739 spin_unlock_irqrestore(&host->lock, flags); 2740 } 2741 2742 spin_lock_irqsave(&host->lock, flags); 2743 2744 host->runtime_suspended = false; 2745 2746 /* Enable SDIO IRQ */ 2747 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2748 sdhci_enable_sdio_irq_nolock(host, true); 2749 2750 /* Enable Card Detection */ 2751 sdhci_enable_card_detection(host); 2752 2753 spin_unlock_irqrestore(&host->lock, flags); 2754 2755 return 0; 2756 } 2757 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2758 2759 #endif /* CONFIG_PM */ 2760 2761 /*****************************************************************************\ 2762 * * 2763 * Device allocation/registration * 2764 * * 2765 \*****************************************************************************/ 2766 2767 struct sdhci_host *sdhci_alloc_host(struct device *dev, 2768 size_t priv_size) 2769 { 2770 struct mmc_host *mmc; 2771 struct sdhci_host *host; 2772 2773 WARN_ON(dev == NULL); 2774 2775 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 2776 if (!mmc) 2777 return ERR_PTR(-ENOMEM); 2778 2779 host = mmc_priv(mmc); 2780 host->mmc = mmc; 2781 host->mmc_host_ops = sdhci_ops; 2782 mmc->ops = &host->mmc_host_ops; 2783 2784 return host; 2785 } 2786 2787 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 2788 2789 static int sdhci_set_dma_mask(struct sdhci_host *host) 2790 { 2791 struct mmc_host *mmc = host->mmc; 2792 struct device *dev = mmc_dev(mmc); 2793 int ret = -EINVAL; 2794 2795 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 2796 host->flags &= ~SDHCI_USE_64_BIT_DMA; 2797 2798 /* Try 64-bit mask if hardware is capable of it */ 2799 if (host->flags & SDHCI_USE_64_BIT_DMA) { 2800 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2801 if (ret) { 2802 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 2803 mmc_hostname(mmc)); 2804 host->flags &= ~SDHCI_USE_64_BIT_DMA; 2805 } 2806 } 2807 2808 /* 32-bit mask as default & fallback */ 2809 if (ret) { 2810 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2811 if (ret) 2812 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 2813 mmc_hostname(mmc)); 2814 } 2815 2816 return ret; 2817 } 2818 2819 int sdhci_add_host(struct sdhci_host *host) 2820 { 2821 struct mmc_host *mmc; 2822 u32 caps[2] = {0, 0}; 2823 u32 max_current_caps; 2824 unsigned int ocr_avail; 2825 unsigned int override_timeout_clk; 2826 u32 max_clk; 2827 int ret; 2828 2829 WARN_ON(host == NULL); 2830 if (host == NULL) 2831 return -EINVAL; 2832 2833 mmc = host->mmc; 2834 2835 if (debug_quirks) 2836 host->quirks = debug_quirks; 2837 if (debug_quirks2) 2838 host->quirks2 = debug_quirks2; 2839 2840 override_timeout_clk = host->timeout_clk; 2841 2842 sdhci_do_reset(host, SDHCI_RESET_ALL); 2843 2844 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2845 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2846 >> SDHCI_SPEC_VER_SHIFT; 2847 if (host->version > SDHCI_SPEC_300) { 2848 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 2849 mmc_hostname(mmc), host->version); 2850 } 2851 2852 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2853 sdhci_readl(host, SDHCI_CAPABILITIES); 2854 2855 if (host->version >= SDHCI_SPEC_300) 2856 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? 2857 host->caps1 : 2858 sdhci_readl(host, SDHCI_CAPABILITIES_1); 2859 2860 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2861 host->flags |= SDHCI_USE_SDMA; 2862 else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) 2863 DBG("Controller doesn't have SDMA capability\n"); 2864 else 2865 host->flags |= SDHCI_USE_SDMA; 2866 2867 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 2868 (host->flags & SDHCI_USE_SDMA)) { 2869 DBG("Disabling DMA as it is marked broken\n"); 2870 host->flags &= ~SDHCI_USE_SDMA; 2871 } 2872 2873 if ((host->version >= SDHCI_SPEC_200) && 2874 (caps[0] & SDHCI_CAN_DO_ADMA2)) 2875 host->flags |= SDHCI_USE_ADMA; 2876 2877 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2878 (host->flags & SDHCI_USE_ADMA)) { 2879 DBG("Disabling ADMA as it is marked broken\n"); 2880 host->flags &= ~SDHCI_USE_ADMA; 2881 } 2882 2883 /* 2884 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask 2885 * and *must* do 64-bit DMA. A driver has the opportunity to change 2886 * that during the first call to ->enable_dma(). Similarly 2887 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to 2888 * implement. 2889 */ 2890 if (caps[0] & SDHCI_CAN_64BIT) 2891 host->flags |= SDHCI_USE_64_BIT_DMA; 2892 2893 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2894 ret = sdhci_set_dma_mask(host); 2895 2896 if (!ret && host->ops->enable_dma) 2897 ret = host->ops->enable_dma(host); 2898 2899 if (ret) { 2900 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 2901 mmc_hostname(mmc)); 2902 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 2903 2904 ret = 0; 2905 } 2906 } 2907 2908 /* SDMA does not support 64-bit DMA */ 2909 if (host->flags & SDHCI_USE_64_BIT_DMA) 2910 host->flags &= ~SDHCI_USE_SDMA; 2911 2912 if (host->flags & SDHCI_USE_ADMA) { 2913 dma_addr_t dma; 2914 void *buf; 2915 2916 /* 2917 * The DMA descriptor table size is calculated as the maximum 2918 * number of segments times 2, to allow for an alignment 2919 * descriptor for each segment, plus 1 for a nop end descriptor, 2920 * all multipled by the descriptor size. 2921 */ 2922 if (host->flags & SDHCI_USE_64_BIT_DMA) { 2923 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 2924 SDHCI_ADMA2_64_DESC_SZ; 2925 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; 2926 } else { 2927 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 2928 SDHCI_ADMA2_32_DESC_SZ; 2929 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 2930 } 2931 2932 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 2933 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 2934 host->adma_table_sz, &dma, GFP_KERNEL); 2935 if (!buf) { 2936 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2937 mmc_hostname(mmc)); 2938 host->flags &= ~SDHCI_USE_ADMA; 2939 } else if ((dma + host->align_buffer_sz) & 2940 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 2941 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 2942 mmc_hostname(mmc)); 2943 host->flags &= ~SDHCI_USE_ADMA; 2944 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 2945 host->adma_table_sz, buf, dma); 2946 } else { 2947 host->align_buffer = buf; 2948 host->align_addr = dma; 2949 2950 host->adma_table = buf + host->align_buffer_sz; 2951 host->adma_addr = dma + host->align_buffer_sz; 2952 } 2953 } 2954 2955 /* 2956 * If we use DMA, then it's up to the caller to set the DMA 2957 * mask, but PIO does not need the hw shim so we set a new 2958 * mask here in that case. 2959 */ 2960 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 2961 host->dma_mask = DMA_BIT_MASK(64); 2962 mmc_dev(mmc)->dma_mask = &host->dma_mask; 2963 } 2964 2965 if (host->version >= SDHCI_SPEC_300) 2966 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) 2967 >> SDHCI_CLOCK_BASE_SHIFT; 2968 else 2969 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) 2970 >> SDHCI_CLOCK_BASE_SHIFT; 2971 2972 host->max_clk *= 1000000; 2973 if (host->max_clk == 0 || host->quirks & 2974 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 2975 if (!host->ops->get_max_clock) { 2976 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 2977 mmc_hostname(mmc)); 2978 ret = -ENODEV; 2979 goto undma; 2980 } 2981 host->max_clk = host->ops->get_max_clock(host); 2982 } 2983 2984 /* 2985 * In case of Host Controller v3.00, find out whether clock 2986 * multiplier is supported. 2987 */ 2988 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> 2989 SDHCI_CLOCK_MUL_SHIFT; 2990 2991 /* 2992 * In case the value in Clock Multiplier is 0, then programmable 2993 * clock mode is not supported, otherwise the actual clock 2994 * multiplier is one more than the value of Clock Multiplier 2995 * in the Capabilities Register. 2996 */ 2997 if (host->clk_mul) 2998 host->clk_mul += 1; 2999 3000 /* 3001 * Set host parameters. 3002 */ 3003 max_clk = host->max_clk; 3004 3005 if (host->ops->get_min_clock) 3006 mmc->f_min = host->ops->get_min_clock(host); 3007 else if (host->version >= SDHCI_SPEC_300) { 3008 if (host->clk_mul) { 3009 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3010 max_clk = host->max_clk * host->clk_mul; 3011 } else 3012 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3013 } else 3014 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3015 3016 if (!mmc->f_max || mmc->f_max > max_clk) 3017 mmc->f_max = max_clk; 3018 3019 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3020 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3021 SDHCI_TIMEOUT_CLK_SHIFT; 3022 if (host->timeout_clk == 0) { 3023 if (host->ops->get_timeout_clock) { 3024 host->timeout_clk = 3025 host->ops->get_timeout_clock(host); 3026 } else { 3027 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3028 mmc_hostname(mmc)); 3029 ret = -ENODEV; 3030 goto undma; 3031 } 3032 } 3033 3034 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) 3035 host->timeout_clk *= 1000; 3036 3037 if (override_timeout_clk) 3038 host->timeout_clk = override_timeout_clk; 3039 3040 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3041 host->ops->get_max_timeout_count(host) : 1 << 27; 3042 mmc->max_busy_timeout /= host->timeout_clk; 3043 } 3044 3045 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3046 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3047 3048 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3049 host->flags |= SDHCI_AUTO_CMD12; 3050 3051 /* Auto-CMD23 stuff only works in ADMA or PIO. */ 3052 if ((host->version >= SDHCI_SPEC_300) && 3053 ((host->flags & SDHCI_USE_ADMA) || 3054 !(host->flags & SDHCI_USE_SDMA)) && 3055 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3056 host->flags |= SDHCI_AUTO_CMD23; 3057 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); 3058 } else { 3059 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); 3060 } 3061 3062 /* 3063 * A controller may support 8-bit width, but the board itself 3064 * might not have the pins brought out. Boards that support 3065 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3066 * their platform code before calling sdhci_add_host(), and we 3067 * won't assume 8-bit width for hosts without that CAP. 3068 */ 3069 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3070 mmc->caps |= MMC_CAP_4_BIT_DATA; 3071 3072 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3073 mmc->caps &= ~MMC_CAP_CMD23; 3074 3075 if (caps[0] & SDHCI_CAN_DO_HISPD) 3076 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3077 3078 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3079 !(mmc->caps & MMC_CAP_NONREMOVABLE) && 3080 mmc_gpio_get_cd(host->mmc) < 0) 3081 mmc->caps |= MMC_CAP_NEEDS_POLL; 3082 3083 /* If there are external regulators, get them */ 3084 ret = mmc_regulator_get_supply(mmc); 3085 if (ret == -EPROBE_DEFER) 3086 goto undma; 3087 3088 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ 3089 if (!IS_ERR(mmc->supply.vqmmc)) { 3090 ret = regulator_enable(mmc->supply.vqmmc); 3091 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 3092 1950000)) 3093 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | 3094 SDHCI_SUPPORT_SDR50 | 3095 SDHCI_SUPPORT_DDR50); 3096 if (ret) { 3097 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3098 mmc_hostname(mmc), ret); 3099 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 3100 } 3101 } 3102 3103 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) 3104 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3105 SDHCI_SUPPORT_DDR50); 3106 3107 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 3108 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3109 SDHCI_SUPPORT_DDR50)) 3110 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 3111 3112 /* SDR104 supports also implies SDR50 support */ 3113 if (caps[1] & SDHCI_SUPPORT_SDR104) { 3114 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 3115 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3116 * field can be promoted to support HS200. 3117 */ 3118 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 3119 mmc->caps2 |= MMC_CAP2_HS200; 3120 } else if (caps[1] & SDHCI_SUPPORT_SDR50) 3121 mmc->caps |= MMC_CAP_UHS_SDR50; 3122 3123 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 3124 (caps[1] & SDHCI_SUPPORT_HS400)) 3125 mmc->caps2 |= MMC_CAP2_HS400; 3126 3127 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 3128 (IS_ERR(mmc->supply.vqmmc) || 3129 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 3130 1300000))) 3131 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 3132 3133 if ((caps[1] & SDHCI_SUPPORT_DDR50) && 3134 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3135 mmc->caps |= MMC_CAP_UHS_DDR50; 3136 3137 /* Does the host need tuning for SDR50? */ 3138 if (caps[1] & SDHCI_USE_SDR50_TUNING) 3139 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 3140 3141 /* Driver Type(s) (A, C, D) supported by the host */ 3142 if (caps[1] & SDHCI_DRIVER_TYPE_A) 3143 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 3144 if (caps[1] & SDHCI_DRIVER_TYPE_C) 3145 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 3146 if (caps[1] & SDHCI_DRIVER_TYPE_D) 3147 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 3148 3149 /* Initial value for re-tuning timer count */ 3150 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 3151 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 3152 3153 /* 3154 * In case Re-tuning Timer is not disabled, the actual value of 3155 * re-tuning timer will be 2 ^ (n - 1). 3156 */ 3157 if (host->tuning_count) 3158 host->tuning_count = 1 << (host->tuning_count - 1); 3159 3160 /* Re-tuning mode supported by the Host Controller */ 3161 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> 3162 SDHCI_RETUNING_MODE_SHIFT; 3163 3164 ocr_avail = 0; 3165 3166 /* 3167 * According to SD Host Controller spec v3.00, if the Host System 3168 * can afford more than 150mA, Host Driver should set XPC to 1. Also 3169 * the value is meaningful only if Voltage Support in the Capabilities 3170 * register is set. The actual current value is 4 times the register 3171 * value. 3172 */ 3173 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 3174 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 3175 int curr = regulator_get_current_limit(mmc->supply.vmmc); 3176 if (curr > 0) { 3177 3178 /* convert to SDHCI_MAX_CURRENT format */ 3179 curr = curr/1000; /* convert to mA */ 3180 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 3181 3182 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 3183 max_current_caps = 3184 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 3185 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 3186 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 3187 } 3188 } 3189 3190 if (caps[0] & SDHCI_CAN_VDD_330) { 3191 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 3192 3193 mmc->max_current_330 = ((max_current_caps & 3194 SDHCI_MAX_CURRENT_330_MASK) >> 3195 SDHCI_MAX_CURRENT_330_SHIFT) * 3196 SDHCI_MAX_CURRENT_MULTIPLIER; 3197 } 3198 if (caps[0] & SDHCI_CAN_VDD_300) { 3199 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 3200 3201 mmc->max_current_300 = ((max_current_caps & 3202 SDHCI_MAX_CURRENT_300_MASK) >> 3203 SDHCI_MAX_CURRENT_300_SHIFT) * 3204 SDHCI_MAX_CURRENT_MULTIPLIER; 3205 } 3206 if (caps[0] & SDHCI_CAN_VDD_180) { 3207 ocr_avail |= MMC_VDD_165_195; 3208 3209 mmc->max_current_180 = ((max_current_caps & 3210 SDHCI_MAX_CURRENT_180_MASK) >> 3211 SDHCI_MAX_CURRENT_180_SHIFT) * 3212 SDHCI_MAX_CURRENT_MULTIPLIER; 3213 } 3214 3215 /* If OCR set by host, use it instead. */ 3216 if (host->ocr_mask) 3217 ocr_avail = host->ocr_mask; 3218 3219 /* If OCR set by external regulators, give it highest prio. */ 3220 if (mmc->ocr_avail) 3221 ocr_avail = mmc->ocr_avail; 3222 3223 mmc->ocr_avail = ocr_avail; 3224 mmc->ocr_avail_sdio = ocr_avail; 3225 if (host->ocr_avail_sdio) 3226 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 3227 mmc->ocr_avail_sd = ocr_avail; 3228 if (host->ocr_avail_sd) 3229 mmc->ocr_avail_sd &= host->ocr_avail_sd; 3230 else /* normal SD controllers don't support 1.8V */ 3231 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 3232 mmc->ocr_avail_mmc = ocr_avail; 3233 if (host->ocr_avail_mmc) 3234 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 3235 3236 if (mmc->ocr_avail == 0) { 3237 pr_err("%s: Hardware doesn't report any support voltages.\n", 3238 mmc_hostname(mmc)); 3239 ret = -ENODEV; 3240 goto unreg; 3241 } 3242 3243 spin_lock_init(&host->lock); 3244 3245 /* 3246 * Maximum number of segments. Depends on if the hardware 3247 * can do scatter/gather or not. 3248 */ 3249 if (host->flags & SDHCI_USE_ADMA) 3250 mmc->max_segs = SDHCI_MAX_SEGS; 3251 else if (host->flags & SDHCI_USE_SDMA) 3252 mmc->max_segs = 1; 3253 else /* PIO */ 3254 mmc->max_segs = SDHCI_MAX_SEGS; 3255 3256 /* 3257 * Maximum number of sectors in one transfer. Limited by SDMA boundary 3258 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 3259 * is less anyway. 3260 */ 3261 mmc->max_req_size = 524288; 3262 3263 /* 3264 * Maximum segment size. Could be one segment with the maximum number 3265 * of bytes. When doing hardware scatter/gather, each entry cannot 3266 * be larger than 64 KiB though. 3267 */ 3268 if (host->flags & SDHCI_USE_ADMA) { 3269 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 3270 mmc->max_seg_size = 65535; 3271 else 3272 mmc->max_seg_size = 65536; 3273 } else { 3274 mmc->max_seg_size = mmc->max_req_size; 3275 } 3276 3277 /* 3278 * Maximum block size. This varies from controller to controller and 3279 * is specified in the capabilities register. 3280 */ 3281 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 3282 mmc->max_blk_size = 2; 3283 } else { 3284 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> 3285 SDHCI_MAX_BLOCK_SHIFT; 3286 if (mmc->max_blk_size >= 3) { 3287 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 3288 mmc_hostname(mmc)); 3289 mmc->max_blk_size = 0; 3290 } 3291 } 3292 3293 mmc->max_blk_size = 512 << mmc->max_blk_size; 3294 3295 /* 3296 * Maximum block count. 3297 */ 3298 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 3299 3300 /* 3301 * Init tasklets. 3302 */ 3303 tasklet_init(&host->finish_tasklet, 3304 sdhci_tasklet_finish, (unsigned long)host); 3305 3306 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 3307 3308 init_waitqueue_head(&host->buf_ready_int); 3309 3310 sdhci_init(host, 0); 3311 3312 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 3313 IRQF_SHARED, mmc_hostname(mmc), host); 3314 if (ret) { 3315 pr_err("%s: Failed to request IRQ %d: %d\n", 3316 mmc_hostname(mmc), host->irq, ret); 3317 goto untasklet; 3318 } 3319 3320 #ifdef CONFIG_MMC_DEBUG 3321 sdhci_dumpregs(host); 3322 #endif 3323 3324 ret = sdhci_led_register(host); 3325 if (ret) { 3326 pr_err("%s: Failed to register LED device: %d\n", 3327 mmc_hostname(mmc), ret); 3328 goto unirq; 3329 } 3330 3331 mmiowb(); 3332 3333 ret = mmc_add_host(mmc); 3334 if (ret) 3335 goto unled; 3336 3337 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3338 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3339 (host->flags & SDHCI_USE_ADMA) ? 3340 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 3341 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3342 3343 sdhci_enable_card_detection(host); 3344 3345 return 0; 3346 3347 unled: 3348 sdhci_led_unregister(host); 3349 unirq: 3350 sdhci_do_reset(host, SDHCI_RESET_ALL); 3351 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3352 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3353 free_irq(host->irq, host); 3354 untasklet: 3355 tasklet_kill(&host->finish_tasklet); 3356 unreg: 3357 if (!IS_ERR(mmc->supply.vqmmc)) 3358 regulator_disable(mmc->supply.vqmmc); 3359 undma: 3360 if (host->align_buffer) 3361 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3362 host->adma_table_sz, host->align_buffer, 3363 host->align_addr); 3364 host->adma_table = NULL; 3365 host->align_buffer = NULL; 3366 3367 return ret; 3368 } 3369 3370 EXPORT_SYMBOL_GPL(sdhci_add_host); 3371 3372 void sdhci_remove_host(struct sdhci_host *host, int dead) 3373 { 3374 struct mmc_host *mmc = host->mmc; 3375 unsigned long flags; 3376 3377 if (dead) { 3378 spin_lock_irqsave(&host->lock, flags); 3379 3380 host->flags |= SDHCI_DEVICE_DEAD; 3381 3382 if (host->mrq) { 3383 pr_err("%s: Controller removed during " 3384 " transfer!\n", mmc_hostname(mmc)); 3385 3386 host->mrq->cmd->error = -ENOMEDIUM; 3387 tasklet_schedule(&host->finish_tasklet); 3388 } 3389 3390 spin_unlock_irqrestore(&host->lock, flags); 3391 } 3392 3393 sdhci_disable_card_detection(host); 3394 3395 mmc_remove_host(mmc); 3396 3397 sdhci_led_unregister(host); 3398 3399 if (!dead) 3400 sdhci_do_reset(host, SDHCI_RESET_ALL); 3401 3402 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3403 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3404 free_irq(host->irq, host); 3405 3406 del_timer_sync(&host->timer); 3407 3408 tasklet_kill(&host->finish_tasklet); 3409 3410 if (!IS_ERR(mmc->supply.vqmmc)) 3411 regulator_disable(mmc->supply.vqmmc); 3412 3413 if (host->align_buffer) 3414 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3415 host->adma_table_sz, host->align_buffer, 3416 host->align_addr); 3417 3418 host->adma_table = NULL; 3419 host->align_buffer = NULL; 3420 } 3421 3422 EXPORT_SYMBOL_GPL(sdhci_remove_host); 3423 3424 void sdhci_free_host(struct sdhci_host *host) 3425 { 3426 mmc_free_host(host->mmc); 3427 } 3428 3429 EXPORT_SYMBOL_GPL(sdhci_free_host); 3430 3431 /*****************************************************************************\ 3432 * * 3433 * Driver init/exit * 3434 * * 3435 \*****************************************************************************/ 3436 3437 static int __init sdhci_drv_init(void) 3438 { 3439 pr_info(DRIVER_NAME 3440 ": Secure Digital Host Controller Interface driver\n"); 3441 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 3442 3443 return 0; 3444 } 3445 3446 static void __exit sdhci_drv_exit(void) 3447 { 3448 } 3449 3450 module_init(sdhci_drv_init); 3451 module_exit(sdhci_drv_exit); 3452 3453 module_param(debug_quirks, uint, 0444); 3454 module_param(debug_quirks2, uint, 0444); 3455 3456 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 3457 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 3458 MODULE_LICENSE("GPL"); 3459 3460 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 3461 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 3462