1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/highmem.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/slab.h> 22 #include <linux/scatterlist.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 26 #include <linux/leds.h> 27 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/sdio.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #include "sdhci.h" 35 36 #define DRIVER_NAME "sdhci" 37 38 #define DBG(f, x...) \ 39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 40 41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 42 defined(CONFIG_MMC_SDHCI_MODULE)) 43 #define SDHCI_USE_LEDS_CLASS 44 #endif 45 46 #define MAX_TUNING_LOOP 40 47 48 static unsigned int debug_quirks = 0; 49 static unsigned int debug_quirks2; 50 51 static void sdhci_finish_data(struct sdhci_host *); 52 53 static void sdhci_finish_command(struct sdhci_host *); 54 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 56 static int sdhci_do_get_cd(struct sdhci_host *host); 57 58 #ifdef CONFIG_PM 59 static int sdhci_runtime_pm_get(struct sdhci_host *host); 60 static int sdhci_runtime_pm_put(struct sdhci_host *host); 61 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); 62 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host); 63 #else 64 static inline int sdhci_runtime_pm_get(struct sdhci_host *host) 65 { 66 return 0; 67 } 68 static inline int sdhci_runtime_pm_put(struct sdhci_host *host) 69 { 70 return 0; 71 } 72 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 73 { 74 } 75 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 76 { 77 } 78 #endif 79 80 static void sdhci_dumpregs(struct sdhci_host *host) 81 { 82 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 83 mmc_hostname(host->mmc)); 84 85 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 86 sdhci_readl(host, SDHCI_DMA_ADDRESS), 87 sdhci_readw(host, SDHCI_HOST_VERSION)); 88 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 89 sdhci_readw(host, SDHCI_BLOCK_SIZE), 90 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 91 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 92 sdhci_readl(host, SDHCI_ARGUMENT), 93 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 94 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 95 sdhci_readl(host, SDHCI_PRESENT_STATE), 96 sdhci_readb(host, SDHCI_HOST_CONTROL)); 97 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 98 sdhci_readb(host, SDHCI_POWER_CONTROL), 99 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 100 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 101 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 102 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 103 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 104 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 105 sdhci_readl(host, SDHCI_INT_STATUS)); 106 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 107 sdhci_readl(host, SDHCI_INT_ENABLE), 108 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 109 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 110 sdhci_readw(host, SDHCI_ACMD12_ERR), 111 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 112 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 113 sdhci_readl(host, SDHCI_CAPABILITIES), 114 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 115 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 116 sdhci_readw(host, SDHCI_COMMAND), 117 sdhci_readl(host, SDHCI_MAX_CURRENT)); 118 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 119 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 120 121 if (host->flags & SDHCI_USE_ADMA) { 122 if (host->flags & SDHCI_USE_64_BIT_DMA) 123 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 124 readl(host->ioaddr + SDHCI_ADMA_ERROR), 125 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), 126 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 127 else 128 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 129 readl(host->ioaddr + SDHCI_ADMA_ERROR), 130 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 131 } 132 133 pr_debug(DRIVER_NAME ": ===========================================\n"); 134 } 135 136 /*****************************************************************************\ 137 * * 138 * Low level functions * 139 * * 140 \*****************************************************************************/ 141 142 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 143 { 144 u32 present; 145 146 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 147 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 148 return; 149 150 if (enable) { 151 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 152 SDHCI_CARD_PRESENT; 153 154 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 155 SDHCI_INT_CARD_INSERT; 156 } else { 157 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 158 } 159 160 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 161 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 162 } 163 164 static void sdhci_enable_card_detection(struct sdhci_host *host) 165 { 166 sdhci_set_card_detection(host, true); 167 } 168 169 static void sdhci_disable_card_detection(struct sdhci_host *host) 170 { 171 sdhci_set_card_detection(host, false); 172 } 173 174 void sdhci_reset(struct sdhci_host *host, u8 mask) 175 { 176 unsigned long timeout; 177 178 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 179 180 if (mask & SDHCI_RESET_ALL) { 181 host->clock = 0; 182 /* Reset-all turns off SD Bus Power */ 183 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 184 sdhci_runtime_pm_bus_off(host); 185 } 186 187 /* Wait max 100 ms */ 188 timeout = 100; 189 190 /* hw clears the bit when it's done */ 191 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 192 if (timeout == 0) { 193 pr_err("%s: Reset 0x%x never completed.\n", 194 mmc_hostname(host->mmc), (int)mask); 195 sdhci_dumpregs(host); 196 return; 197 } 198 timeout--; 199 mdelay(1); 200 } 201 } 202 EXPORT_SYMBOL_GPL(sdhci_reset); 203 204 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 205 { 206 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 207 if (!sdhci_do_get_cd(host)) 208 return; 209 } 210 211 host->ops->reset(host, mask); 212 213 if (mask & SDHCI_RESET_ALL) { 214 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 215 if (host->ops->enable_dma) 216 host->ops->enable_dma(host); 217 } 218 219 /* Resetting the controller clears many */ 220 host->preset_enabled = false; 221 } 222 } 223 224 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 225 226 static void sdhci_init(struct sdhci_host *host, int soft) 227 { 228 if (soft) 229 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 230 else 231 sdhci_do_reset(host, SDHCI_RESET_ALL); 232 233 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 234 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 235 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 236 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 237 SDHCI_INT_RESPONSE; 238 239 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 240 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 241 242 if (soft) { 243 /* force clock reconfiguration */ 244 host->clock = 0; 245 sdhci_set_ios(host->mmc, &host->mmc->ios); 246 } 247 } 248 249 static void sdhci_reinit(struct sdhci_host *host) 250 { 251 sdhci_init(host, 0); 252 sdhci_enable_card_detection(host); 253 } 254 255 static void sdhci_activate_led(struct sdhci_host *host) 256 { 257 u8 ctrl; 258 259 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 260 ctrl |= SDHCI_CTRL_LED; 261 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 262 } 263 264 static void sdhci_deactivate_led(struct sdhci_host *host) 265 { 266 u8 ctrl; 267 268 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 269 ctrl &= ~SDHCI_CTRL_LED; 270 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 271 } 272 273 #ifdef SDHCI_USE_LEDS_CLASS 274 static void sdhci_led_control(struct led_classdev *led, 275 enum led_brightness brightness) 276 { 277 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 278 unsigned long flags; 279 280 spin_lock_irqsave(&host->lock, flags); 281 282 if (host->runtime_suspended) 283 goto out; 284 285 if (brightness == LED_OFF) 286 sdhci_deactivate_led(host); 287 else 288 sdhci_activate_led(host); 289 out: 290 spin_unlock_irqrestore(&host->lock, flags); 291 } 292 #endif 293 294 /*****************************************************************************\ 295 * * 296 * Core functions * 297 * * 298 \*****************************************************************************/ 299 300 static void sdhci_read_block_pio(struct sdhci_host *host) 301 { 302 unsigned long flags; 303 size_t blksize, len, chunk; 304 u32 uninitialized_var(scratch); 305 u8 *buf; 306 307 DBG("PIO reading\n"); 308 309 blksize = host->data->blksz; 310 chunk = 0; 311 312 local_irq_save(flags); 313 314 while (blksize) { 315 BUG_ON(!sg_miter_next(&host->sg_miter)); 316 317 len = min(host->sg_miter.length, blksize); 318 319 blksize -= len; 320 host->sg_miter.consumed = len; 321 322 buf = host->sg_miter.addr; 323 324 while (len) { 325 if (chunk == 0) { 326 scratch = sdhci_readl(host, SDHCI_BUFFER); 327 chunk = 4; 328 } 329 330 *buf = scratch & 0xFF; 331 332 buf++; 333 scratch >>= 8; 334 chunk--; 335 len--; 336 } 337 } 338 339 sg_miter_stop(&host->sg_miter); 340 341 local_irq_restore(flags); 342 } 343 344 static void sdhci_write_block_pio(struct sdhci_host *host) 345 { 346 unsigned long flags; 347 size_t blksize, len, chunk; 348 u32 scratch; 349 u8 *buf; 350 351 DBG("PIO writing\n"); 352 353 blksize = host->data->blksz; 354 chunk = 0; 355 scratch = 0; 356 357 local_irq_save(flags); 358 359 while (blksize) { 360 BUG_ON(!sg_miter_next(&host->sg_miter)); 361 362 len = min(host->sg_miter.length, blksize); 363 364 blksize -= len; 365 host->sg_miter.consumed = len; 366 367 buf = host->sg_miter.addr; 368 369 while (len) { 370 scratch |= (u32)*buf << (chunk * 8); 371 372 buf++; 373 chunk++; 374 len--; 375 376 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 377 sdhci_writel(host, scratch, SDHCI_BUFFER); 378 chunk = 0; 379 scratch = 0; 380 } 381 } 382 } 383 384 sg_miter_stop(&host->sg_miter); 385 386 local_irq_restore(flags); 387 } 388 389 static void sdhci_transfer_pio(struct sdhci_host *host) 390 { 391 u32 mask; 392 393 BUG_ON(!host->data); 394 395 if (host->blocks == 0) 396 return; 397 398 if (host->data->flags & MMC_DATA_READ) 399 mask = SDHCI_DATA_AVAILABLE; 400 else 401 mask = SDHCI_SPACE_AVAILABLE; 402 403 /* 404 * Some controllers (JMicron JMB38x) mess up the buffer bits 405 * for transfers < 4 bytes. As long as it is just one block, 406 * we can ignore the bits. 407 */ 408 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 409 (host->data->blocks == 1)) 410 mask = ~0; 411 412 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 413 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 414 udelay(100); 415 416 if (host->data->flags & MMC_DATA_READ) 417 sdhci_read_block_pio(host); 418 else 419 sdhci_write_block_pio(host); 420 421 host->blocks--; 422 if (host->blocks == 0) 423 break; 424 } 425 426 DBG("PIO transfer complete.\n"); 427 } 428 429 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 430 struct mmc_data *data, int cookie) 431 { 432 int sg_count; 433 434 /* 435 * If the data buffers are already mapped, return the previous 436 * dma_map_sg() result. 437 */ 438 if (data->host_cookie == COOKIE_PRE_MAPPED) 439 return data->sg_count; 440 441 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 442 data->flags & MMC_DATA_WRITE ? 443 DMA_TO_DEVICE : DMA_FROM_DEVICE); 444 445 if (sg_count == 0) 446 return -ENOSPC; 447 448 data->sg_count = sg_count; 449 data->host_cookie = cookie; 450 451 return sg_count; 452 } 453 454 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 455 { 456 local_irq_save(*flags); 457 return kmap_atomic(sg_page(sg)) + sg->offset; 458 } 459 460 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 461 { 462 kunmap_atomic(buffer); 463 local_irq_restore(*flags); 464 } 465 466 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, 467 dma_addr_t addr, int len, unsigned cmd) 468 { 469 struct sdhci_adma2_64_desc *dma_desc = desc; 470 471 /* 32-bit and 64-bit descriptors have these members in same position */ 472 dma_desc->cmd = cpu_to_le16(cmd); 473 dma_desc->len = cpu_to_le16(len); 474 dma_desc->addr_lo = cpu_to_le32((u32)addr); 475 476 if (host->flags & SDHCI_USE_64_BIT_DMA) 477 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); 478 } 479 480 static void sdhci_adma_mark_end(void *desc) 481 { 482 struct sdhci_adma2_64_desc *dma_desc = desc; 483 484 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 485 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 486 } 487 488 static void sdhci_adma_table_pre(struct sdhci_host *host, 489 struct mmc_data *data, int sg_count) 490 { 491 struct scatterlist *sg; 492 unsigned long flags; 493 dma_addr_t addr, align_addr; 494 void *desc, *align; 495 char *buffer; 496 int len, offset, i; 497 498 /* 499 * The spec does not specify endianness of descriptor table. 500 * We currently guess that it is LE. 501 */ 502 503 host->sg_count = sg_count; 504 505 desc = host->adma_table; 506 align = host->align_buffer; 507 508 align_addr = host->align_addr; 509 510 for_each_sg(data->sg, sg, host->sg_count, i) { 511 addr = sg_dma_address(sg); 512 len = sg_dma_len(sg); 513 514 /* 515 * The SDHCI specification states that ADMA addresses must 516 * be 32-bit aligned. If they aren't, then we use a bounce 517 * buffer for the (up to three) bytes that screw up the 518 * alignment. 519 */ 520 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 521 SDHCI_ADMA2_MASK; 522 if (offset) { 523 if (data->flags & MMC_DATA_WRITE) { 524 buffer = sdhci_kmap_atomic(sg, &flags); 525 memcpy(align, buffer, offset); 526 sdhci_kunmap_atomic(buffer, &flags); 527 } 528 529 /* tran, valid */ 530 sdhci_adma_write_desc(host, desc, align_addr, offset, 531 ADMA2_TRAN_VALID); 532 533 BUG_ON(offset > 65536); 534 535 align += SDHCI_ADMA2_ALIGN; 536 align_addr += SDHCI_ADMA2_ALIGN; 537 538 desc += host->desc_sz; 539 540 addr += offset; 541 len -= offset; 542 } 543 544 BUG_ON(len > 65536); 545 546 if (len) { 547 /* tran, valid */ 548 sdhci_adma_write_desc(host, desc, addr, len, 549 ADMA2_TRAN_VALID); 550 desc += host->desc_sz; 551 } 552 553 /* 554 * If this triggers then we have a calculation bug 555 * somewhere. :/ 556 */ 557 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 558 } 559 560 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 561 /* Mark the last descriptor as the terminating descriptor */ 562 if (desc != host->adma_table) { 563 desc -= host->desc_sz; 564 sdhci_adma_mark_end(desc); 565 } 566 } else { 567 /* Add a terminating entry - nop, end, valid */ 568 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); 569 } 570 } 571 572 static void sdhci_adma_table_post(struct sdhci_host *host, 573 struct mmc_data *data) 574 { 575 struct scatterlist *sg; 576 int i, size; 577 void *align; 578 char *buffer; 579 unsigned long flags; 580 581 if (data->flags & MMC_DATA_READ) { 582 bool has_unaligned = false; 583 584 /* Do a quick scan of the SG list for any unaligned mappings */ 585 for_each_sg(data->sg, sg, host->sg_count, i) 586 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 587 has_unaligned = true; 588 break; 589 } 590 591 if (has_unaligned) { 592 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 593 data->sg_len, DMA_FROM_DEVICE); 594 595 align = host->align_buffer; 596 597 for_each_sg(data->sg, sg, host->sg_count, i) { 598 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 599 size = SDHCI_ADMA2_ALIGN - 600 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 601 602 buffer = sdhci_kmap_atomic(sg, &flags); 603 memcpy(buffer, align, size); 604 sdhci_kunmap_atomic(buffer, &flags); 605 606 align += SDHCI_ADMA2_ALIGN; 607 } 608 } 609 } 610 } 611 } 612 613 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) 614 { 615 u8 count; 616 struct mmc_data *data = cmd->data; 617 unsigned target_timeout, current_timeout; 618 619 /* 620 * If the host controller provides us with an incorrect timeout 621 * value, just skip the check and use 0xE. The hardware may take 622 * longer to time out, but that's much better than having a too-short 623 * timeout value. 624 */ 625 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 626 return 0xE; 627 628 /* Unspecified timeout, assume max */ 629 if (!data && !cmd->busy_timeout) 630 return 0xE; 631 632 /* timeout in us */ 633 if (!data) 634 target_timeout = cmd->busy_timeout * 1000; 635 else { 636 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 637 if (host->clock && data->timeout_clks) { 638 unsigned long long val; 639 640 /* 641 * data->timeout_clks is in units of clock cycles. 642 * host->clock is in Hz. target_timeout is in us. 643 * Hence, us = 1000000 * cycles / Hz. Round up. 644 */ 645 val = 1000000 * data->timeout_clks; 646 if (do_div(val, host->clock)) 647 target_timeout++; 648 target_timeout += val; 649 } 650 } 651 652 /* 653 * Figure out needed cycles. 654 * We do this in steps in order to fit inside a 32 bit int. 655 * The first step is the minimum timeout, which will have a 656 * minimum resolution of 6 bits: 657 * (1) 2^13*1000 > 2^22, 658 * (2) host->timeout_clk < 2^16 659 * => 660 * (1) / (2) > 2^6 661 */ 662 count = 0; 663 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 664 while (current_timeout < target_timeout) { 665 count++; 666 current_timeout <<= 1; 667 if (count >= 0xF) 668 break; 669 } 670 671 if (count >= 0xF) { 672 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", 673 mmc_hostname(host->mmc), count, cmd->opcode); 674 count = 0xE; 675 } 676 677 return count; 678 } 679 680 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 681 { 682 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 683 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 684 685 if (host->flags & SDHCI_REQ_USE_DMA) 686 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 687 else 688 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 689 690 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 691 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 692 } 693 694 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 695 { 696 u8 count; 697 698 if (host->ops->set_timeout) { 699 host->ops->set_timeout(host, cmd); 700 } else { 701 count = sdhci_calc_timeout(host, cmd); 702 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 703 } 704 } 705 706 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 707 { 708 u8 ctrl; 709 struct mmc_data *data = cmd->data; 710 711 WARN_ON(host->data); 712 713 if (data || (cmd->flags & MMC_RSP_BUSY)) 714 sdhci_set_timeout(host, cmd); 715 716 if (!data) 717 return; 718 719 /* Sanity checks */ 720 BUG_ON(data->blksz * data->blocks > 524288); 721 BUG_ON(data->blksz > host->mmc->max_blk_size); 722 BUG_ON(data->blocks > 65535); 723 724 host->data = data; 725 host->data_early = 0; 726 host->data->bytes_xfered = 0; 727 728 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 729 struct scatterlist *sg; 730 unsigned int length_mask, offset_mask; 731 int i; 732 733 host->flags |= SDHCI_REQ_USE_DMA; 734 735 /* 736 * FIXME: This doesn't account for merging when mapping the 737 * scatterlist. 738 * 739 * The assumption here being that alignment and lengths are 740 * the same after DMA mapping to device address space. 741 */ 742 length_mask = 0; 743 offset_mask = 0; 744 if (host->flags & SDHCI_USE_ADMA) { 745 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 746 length_mask = 3; 747 /* 748 * As we use up to 3 byte chunks to work 749 * around alignment problems, we need to 750 * check the offset as well. 751 */ 752 offset_mask = 3; 753 } 754 } else { 755 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 756 length_mask = 3; 757 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 758 offset_mask = 3; 759 } 760 761 if (unlikely(length_mask | offset_mask)) { 762 for_each_sg(data->sg, sg, data->sg_len, i) { 763 if (sg->length & length_mask) { 764 DBG("Reverting to PIO because of transfer size (%d)\n", 765 sg->length); 766 host->flags &= ~SDHCI_REQ_USE_DMA; 767 break; 768 } 769 if (sg->offset & offset_mask) { 770 DBG("Reverting to PIO because of bad alignment\n"); 771 host->flags &= ~SDHCI_REQ_USE_DMA; 772 break; 773 } 774 } 775 } 776 } 777 778 if (host->flags & SDHCI_REQ_USE_DMA) { 779 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 780 781 if (sg_cnt <= 0) { 782 /* 783 * This only happens when someone fed 784 * us an invalid request. 785 */ 786 WARN_ON(1); 787 host->flags &= ~SDHCI_REQ_USE_DMA; 788 } else if (host->flags & SDHCI_USE_ADMA) { 789 sdhci_adma_table_pre(host, data, sg_cnt); 790 791 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS); 792 if (host->flags & SDHCI_USE_64_BIT_DMA) 793 sdhci_writel(host, 794 (u64)host->adma_addr >> 32, 795 SDHCI_ADMA_ADDRESS_HI); 796 } else { 797 WARN_ON(sg_cnt != 1); 798 sdhci_writel(host, sg_dma_address(data->sg), 799 SDHCI_DMA_ADDRESS); 800 } 801 } 802 803 /* 804 * Always adjust the DMA selection as some controllers 805 * (e.g. JMicron) can't do PIO properly when the selection 806 * is ADMA. 807 */ 808 if (host->version >= SDHCI_SPEC_200) { 809 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 810 ctrl &= ~SDHCI_CTRL_DMA_MASK; 811 if ((host->flags & SDHCI_REQ_USE_DMA) && 812 (host->flags & SDHCI_USE_ADMA)) { 813 if (host->flags & SDHCI_USE_64_BIT_DMA) 814 ctrl |= SDHCI_CTRL_ADMA64; 815 else 816 ctrl |= SDHCI_CTRL_ADMA32; 817 } else { 818 ctrl |= SDHCI_CTRL_SDMA; 819 } 820 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 821 } 822 823 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 824 int flags; 825 826 flags = SG_MITER_ATOMIC; 827 if (host->data->flags & MMC_DATA_READ) 828 flags |= SG_MITER_TO_SG; 829 else 830 flags |= SG_MITER_FROM_SG; 831 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 832 host->blocks = data->blocks; 833 } 834 835 sdhci_set_transfer_irqs(host); 836 837 /* Set the DMA boundary value and block size */ 838 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 839 data->blksz), SDHCI_BLOCK_SIZE); 840 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 841 } 842 843 static void sdhci_set_transfer_mode(struct sdhci_host *host, 844 struct mmc_command *cmd) 845 { 846 u16 mode = 0; 847 struct mmc_data *data = cmd->data; 848 849 if (data == NULL) { 850 if (host->quirks2 & 851 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 852 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 853 } else { 854 /* clear Auto CMD settings for no data CMDs */ 855 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 856 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 857 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 858 } 859 return; 860 } 861 862 WARN_ON(!host->data); 863 864 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 865 mode = SDHCI_TRNS_BLK_CNT_EN; 866 867 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 868 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 869 /* 870 * If we are sending CMD23, CMD12 never gets sent 871 * on successful completion (so no Auto-CMD12). 872 */ 873 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 874 (cmd->opcode != SD_IO_RW_EXTENDED)) 875 mode |= SDHCI_TRNS_AUTO_CMD12; 876 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 877 mode |= SDHCI_TRNS_AUTO_CMD23; 878 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); 879 } 880 } 881 882 if (data->flags & MMC_DATA_READ) 883 mode |= SDHCI_TRNS_READ; 884 if (host->flags & SDHCI_REQ_USE_DMA) 885 mode |= SDHCI_TRNS_DMA; 886 887 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 888 } 889 890 static void sdhci_finish_data(struct sdhci_host *host) 891 { 892 struct mmc_data *data; 893 894 BUG_ON(!host->data); 895 896 data = host->data; 897 host->data = NULL; 898 899 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 900 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 901 sdhci_adma_table_post(host, data); 902 903 /* 904 * The specification states that the block count register must 905 * be updated, but it does not specify at what point in the 906 * data flow. That makes the register entirely useless to read 907 * back so we have to assume that nothing made it to the card 908 * in the event of an error. 909 */ 910 if (data->error) 911 data->bytes_xfered = 0; 912 else 913 data->bytes_xfered = data->blksz * data->blocks; 914 915 /* 916 * Need to send CMD12 if - 917 * a) open-ended multiblock transfer (no CMD23) 918 * b) error in multiblock transfer 919 */ 920 if (data->stop && 921 (data->error || 922 !host->mrq->sbc)) { 923 924 /* 925 * The controller needs a reset of internal state machines 926 * upon error conditions. 927 */ 928 if (data->error) { 929 sdhci_do_reset(host, SDHCI_RESET_CMD); 930 sdhci_do_reset(host, SDHCI_RESET_DATA); 931 } 932 933 sdhci_send_command(host, data->stop); 934 } else 935 tasklet_schedule(&host->finish_tasklet); 936 } 937 938 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 939 { 940 int flags; 941 u32 mask; 942 unsigned long timeout; 943 944 WARN_ON(host->cmd); 945 946 /* Initially, a command has no error */ 947 cmd->error = 0; 948 949 /* Wait max 10 ms */ 950 timeout = 10; 951 952 mask = SDHCI_CMD_INHIBIT; 953 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 954 mask |= SDHCI_DATA_INHIBIT; 955 956 /* We shouldn't wait for data inihibit for stop commands, even 957 though they might use busy signaling */ 958 if (host->mrq->data && (cmd == host->mrq->data->stop)) 959 mask &= ~SDHCI_DATA_INHIBIT; 960 961 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 962 if (timeout == 0) { 963 pr_err("%s: Controller never released inhibit bit(s).\n", 964 mmc_hostname(host->mmc)); 965 sdhci_dumpregs(host); 966 cmd->error = -EIO; 967 tasklet_schedule(&host->finish_tasklet); 968 return; 969 } 970 timeout--; 971 mdelay(1); 972 } 973 974 timeout = jiffies; 975 if (!cmd->data && cmd->busy_timeout > 9000) 976 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 977 else 978 timeout += 10 * HZ; 979 mod_timer(&host->timer, timeout); 980 981 host->cmd = cmd; 982 host->busy_handle = 0; 983 984 sdhci_prepare_data(host, cmd); 985 986 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 987 988 sdhci_set_transfer_mode(host, cmd); 989 990 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 991 pr_err("%s: Unsupported response type!\n", 992 mmc_hostname(host->mmc)); 993 cmd->error = -EINVAL; 994 tasklet_schedule(&host->finish_tasklet); 995 return; 996 } 997 998 if (!(cmd->flags & MMC_RSP_PRESENT)) 999 flags = SDHCI_CMD_RESP_NONE; 1000 else if (cmd->flags & MMC_RSP_136) 1001 flags = SDHCI_CMD_RESP_LONG; 1002 else if (cmd->flags & MMC_RSP_BUSY) 1003 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1004 else 1005 flags = SDHCI_CMD_RESP_SHORT; 1006 1007 if (cmd->flags & MMC_RSP_CRC) 1008 flags |= SDHCI_CMD_CRC; 1009 if (cmd->flags & MMC_RSP_OPCODE) 1010 flags |= SDHCI_CMD_INDEX; 1011 1012 /* CMD19 is special in that the Data Present Select should be set */ 1013 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1014 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1015 flags |= SDHCI_CMD_DATA; 1016 1017 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1018 } 1019 EXPORT_SYMBOL_GPL(sdhci_send_command); 1020 1021 static void sdhci_finish_command(struct sdhci_host *host) 1022 { 1023 int i; 1024 1025 BUG_ON(host->cmd == NULL); 1026 1027 if (host->cmd->flags & MMC_RSP_PRESENT) { 1028 if (host->cmd->flags & MMC_RSP_136) { 1029 /* CRC is stripped so we need to do some shifting. */ 1030 for (i = 0;i < 4;i++) { 1031 host->cmd->resp[i] = sdhci_readl(host, 1032 SDHCI_RESPONSE + (3-i)*4) << 8; 1033 if (i != 3) 1034 host->cmd->resp[i] |= 1035 sdhci_readb(host, 1036 SDHCI_RESPONSE + (3-i)*4-1); 1037 } 1038 } else { 1039 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1040 } 1041 } 1042 1043 /* Finished CMD23, now send actual command. */ 1044 if (host->cmd == host->mrq->sbc) { 1045 host->cmd = NULL; 1046 sdhci_send_command(host, host->mrq->cmd); 1047 } else { 1048 1049 /* Processed actual command. */ 1050 if (host->data && host->data_early) 1051 sdhci_finish_data(host); 1052 1053 if (!host->cmd->data) 1054 tasklet_schedule(&host->finish_tasklet); 1055 1056 host->cmd = NULL; 1057 } 1058 } 1059 1060 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1061 { 1062 u16 preset = 0; 1063 1064 switch (host->timing) { 1065 case MMC_TIMING_UHS_SDR12: 1066 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1067 break; 1068 case MMC_TIMING_UHS_SDR25: 1069 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1070 break; 1071 case MMC_TIMING_UHS_SDR50: 1072 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1073 break; 1074 case MMC_TIMING_UHS_SDR104: 1075 case MMC_TIMING_MMC_HS200: 1076 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1077 break; 1078 case MMC_TIMING_UHS_DDR50: 1079 case MMC_TIMING_MMC_DDR52: 1080 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1081 break; 1082 case MMC_TIMING_MMC_HS400: 1083 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1084 break; 1085 default: 1086 pr_warn("%s: Invalid UHS-I mode selected\n", 1087 mmc_hostname(host->mmc)); 1088 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1089 break; 1090 } 1091 return preset; 1092 } 1093 1094 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1095 { 1096 int div = 0; /* Initialized for compiler warning */ 1097 int real_div = div, clk_mul = 1; 1098 u16 clk = 0; 1099 unsigned long timeout; 1100 bool switch_base_clk = false; 1101 1102 host->mmc->actual_clock = 0; 1103 1104 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1105 if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) 1106 mdelay(1); 1107 1108 if (clock == 0) 1109 return; 1110 1111 if (host->version >= SDHCI_SPEC_300) { 1112 if (host->preset_enabled) { 1113 u16 pre_val; 1114 1115 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1116 pre_val = sdhci_get_preset_value(host); 1117 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1118 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1119 if (host->clk_mul && 1120 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1121 clk = SDHCI_PROG_CLOCK_MODE; 1122 real_div = div + 1; 1123 clk_mul = host->clk_mul; 1124 } else { 1125 real_div = max_t(int, 1, div << 1); 1126 } 1127 goto clock_set; 1128 } 1129 1130 /* 1131 * Check if the Host Controller supports Programmable Clock 1132 * Mode. 1133 */ 1134 if (host->clk_mul) { 1135 for (div = 1; div <= 1024; div++) { 1136 if ((host->max_clk * host->clk_mul / div) 1137 <= clock) 1138 break; 1139 } 1140 if ((host->max_clk * host->clk_mul / div) <= clock) { 1141 /* 1142 * Set Programmable Clock Mode in the Clock 1143 * Control register. 1144 */ 1145 clk = SDHCI_PROG_CLOCK_MODE; 1146 real_div = div; 1147 clk_mul = host->clk_mul; 1148 div--; 1149 } else { 1150 /* 1151 * Divisor can be too small to reach clock 1152 * speed requirement. Then use the base clock. 1153 */ 1154 switch_base_clk = true; 1155 } 1156 } 1157 1158 if (!host->clk_mul || switch_base_clk) { 1159 /* Version 3.00 divisors must be a multiple of 2. */ 1160 if (host->max_clk <= clock) 1161 div = 1; 1162 else { 1163 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1164 div += 2) { 1165 if ((host->max_clk / div) <= clock) 1166 break; 1167 } 1168 } 1169 real_div = div; 1170 div >>= 1; 1171 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1172 && !div && host->max_clk <= 25000000) 1173 div = 1; 1174 } 1175 } else { 1176 /* Version 2.00 divisors must be a power of 2. */ 1177 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1178 if ((host->max_clk / div) <= clock) 1179 break; 1180 } 1181 real_div = div; 1182 div >>= 1; 1183 } 1184 1185 clock_set: 1186 if (real_div) 1187 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; 1188 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1189 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1190 << SDHCI_DIVIDER_HI_SHIFT; 1191 clk |= SDHCI_CLOCK_INT_EN; 1192 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1193 1194 /* Wait max 20 ms */ 1195 timeout = 20; 1196 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1197 & SDHCI_CLOCK_INT_STABLE)) { 1198 if (timeout == 0) { 1199 pr_err("%s: Internal clock never stabilised.\n", 1200 mmc_hostname(host->mmc)); 1201 sdhci_dumpregs(host); 1202 return; 1203 } 1204 timeout--; 1205 mdelay(1); 1206 } 1207 1208 clk |= SDHCI_CLOCK_CARD_EN; 1209 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1210 } 1211 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1212 1213 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1214 unsigned short vdd) 1215 { 1216 struct mmc_host *mmc = host->mmc; 1217 1218 spin_unlock_irq(&host->lock); 1219 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1220 spin_lock_irq(&host->lock); 1221 1222 if (mode != MMC_POWER_OFF) 1223 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1224 else 1225 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1226 } 1227 1228 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1229 unsigned short vdd) 1230 { 1231 u8 pwr = 0; 1232 1233 if (mode != MMC_POWER_OFF) { 1234 switch (1 << vdd) { 1235 case MMC_VDD_165_195: 1236 pwr = SDHCI_POWER_180; 1237 break; 1238 case MMC_VDD_29_30: 1239 case MMC_VDD_30_31: 1240 pwr = SDHCI_POWER_300; 1241 break; 1242 case MMC_VDD_32_33: 1243 case MMC_VDD_33_34: 1244 pwr = SDHCI_POWER_330; 1245 break; 1246 default: 1247 WARN(1, "%s: Invalid vdd %#x\n", 1248 mmc_hostname(host->mmc), vdd); 1249 break; 1250 } 1251 } 1252 1253 if (host->pwr == pwr) 1254 return; 1255 1256 host->pwr = pwr; 1257 1258 if (pwr == 0) { 1259 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1260 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1261 sdhci_runtime_pm_bus_off(host); 1262 } else { 1263 /* 1264 * Spec says that we should clear the power reg before setting 1265 * a new value. Some controllers don't seem to like this though. 1266 */ 1267 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1268 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1269 1270 /* 1271 * At least the Marvell CaFe chip gets confused if we set the 1272 * voltage and set turn on power at the same time, so set the 1273 * voltage first. 1274 */ 1275 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1276 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1277 1278 pwr |= SDHCI_POWER_ON; 1279 1280 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1281 1282 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1283 sdhci_runtime_pm_bus_on(host); 1284 1285 /* 1286 * Some controllers need an extra 10ms delay of 10ms before 1287 * they can apply clock after applying power 1288 */ 1289 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1290 mdelay(10); 1291 } 1292 } 1293 EXPORT_SYMBOL_GPL(sdhci_set_power); 1294 1295 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1296 unsigned short vdd) 1297 { 1298 struct mmc_host *mmc = host->mmc; 1299 1300 if (host->ops->set_power) 1301 host->ops->set_power(host, mode, vdd); 1302 else if (!IS_ERR(mmc->supply.vmmc)) 1303 sdhci_set_power_reg(host, mode, vdd); 1304 else 1305 sdhci_set_power(host, mode, vdd); 1306 } 1307 1308 /*****************************************************************************\ 1309 * * 1310 * MMC callbacks * 1311 * * 1312 \*****************************************************************************/ 1313 1314 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1315 { 1316 struct sdhci_host *host; 1317 int present; 1318 unsigned long flags; 1319 1320 host = mmc_priv(mmc); 1321 1322 sdhci_runtime_pm_get(host); 1323 1324 /* Firstly check card presence */ 1325 present = mmc->ops->get_cd(mmc); 1326 1327 spin_lock_irqsave(&host->lock, flags); 1328 1329 WARN_ON(host->mrq != NULL); 1330 1331 #ifndef SDHCI_USE_LEDS_CLASS 1332 sdhci_activate_led(host); 1333 #endif 1334 1335 /* 1336 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1337 * requests if Auto-CMD12 is enabled. 1338 */ 1339 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 1340 if (mrq->stop) { 1341 mrq->data->stop = NULL; 1342 mrq->stop = NULL; 1343 } 1344 } 1345 1346 host->mrq = mrq; 1347 1348 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1349 host->mrq->cmd->error = -ENOMEDIUM; 1350 tasklet_schedule(&host->finish_tasklet); 1351 } else { 1352 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1353 sdhci_send_command(host, mrq->sbc); 1354 else 1355 sdhci_send_command(host, mrq->cmd); 1356 } 1357 1358 mmiowb(); 1359 spin_unlock_irqrestore(&host->lock, flags); 1360 } 1361 1362 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1363 { 1364 u8 ctrl; 1365 1366 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1367 if (width == MMC_BUS_WIDTH_8) { 1368 ctrl &= ~SDHCI_CTRL_4BITBUS; 1369 if (host->version >= SDHCI_SPEC_300) 1370 ctrl |= SDHCI_CTRL_8BITBUS; 1371 } else { 1372 if (host->version >= SDHCI_SPEC_300) 1373 ctrl &= ~SDHCI_CTRL_8BITBUS; 1374 if (width == MMC_BUS_WIDTH_4) 1375 ctrl |= SDHCI_CTRL_4BITBUS; 1376 else 1377 ctrl &= ~SDHCI_CTRL_4BITBUS; 1378 } 1379 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1380 } 1381 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1382 1383 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1384 { 1385 u16 ctrl_2; 1386 1387 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1388 /* Select Bus Speed Mode for host */ 1389 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1390 if ((timing == MMC_TIMING_MMC_HS200) || 1391 (timing == MMC_TIMING_UHS_SDR104)) 1392 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1393 else if (timing == MMC_TIMING_UHS_SDR12) 1394 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1395 else if (timing == MMC_TIMING_UHS_SDR25) 1396 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1397 else if (timing == MMC_TIMING_UHS_SDR50) 1398 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1399 else if ((timing == MMC_TIMING_UHS_DDR50) || 1400 (timing == MMC_TIMING_MMC_DDR52)) 1401 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1402 else if (timing == MMC_TIMING_MMC_HS400) 1403 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1404 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1405 } 1406 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1407 1408 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1409 { 1410 unsigned long flags; 1411 u8 ctrl; 1412 struct mmc_host *mmc = host->mmc; 1413 1414 spin_lock_irqsave(&host->lock, flags); 1415 1416 if (host->flags & SDHCI_DEVICE_DEAD) { 1417 spin_unlock_irqrestore(&host->lock, flags); 1418 if (!IS_ERR(mmc->supply.vmmc) && 1419 ios->power_mode == MMC_POWER_OFF) 1420 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1421 return; 1422 } 1423 1424 /* 1425 * Reset the chip on each power off. 1426 * Should clear out any weird states. 1427 */ 1428 if (ios->power_mode == MMC_POWER_OFF) { 1429 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1430 sdhci_reinit(host); 1431 } 1432 1433 if (host->version >= SDHCI_SPEC_300 && 1434 (ios->power_mode == MMC_POWER_UP) && 1435 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1436 sdhci_enable_preset_value(host, false); 1437 1438 if (!ios->clock || ios->clock != host->clock) { 1439 host->ops->set_clock(host, ios->clock); 1440 host->clock = ios->clock; 1441 1442 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1443 host->clock) { 1444 host->timeout_clk = host->mmc->actual_clock ? 1445 host->mmc->actual_clock / 1000 : 1446 host->clock / 1000; 1447 host->mmc->max_busy_timeout = 1448 host->ops->get_max_timeout_count ? 1449 host->ops->get_max_timeout_count(host) : 1450 1 << 27; 1451 host->mmc->max_busy_timeout /= host->timeout_clk; 1452 } 1453 } 1454 1455 __sdhci_set_power(host, ios->power_mode, ios->vdd); 1456 1457 if (host->ops->platform_send_init_74_clocks) 1458 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1459 1460 host->ops->set_bus_width(host, ios->bus_width); 1461 1462 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1463 1464 if ((ios->timing == MMC_TIMING_SD_HS || 1465 ios->timing == MMC_TIMING_MMC_HS) 1466 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1467 ctrl |= SDHCI_CTRL_HISPD; 1468 else 1469 ctrl &= ~SDHCI_CTRL_HISPD; 1470 1471 if (host->version >= SDHCI_SPEC_300) { 1472 u16 clk, ctrl_2; 1473 1474 /* In case of UHS-I modes, set High Speed Enable */ 1475 if ((ios->timing == MMC_TIMING_MMC_HS400) || 1476 (ios->timing == MMC_TIMING_MMC_HS200) || 1477 (ios->timing == MMC_TIMING_MMC_DDR52) || 1478 (ios->timing == MMC_TIMING_UHS_SDR50) || 1479 (ios->timing == MMC_TIMING_UHS_SDR104) || 1480 (ios->timing == MMC_TIMING_UHS_DDR50) || 1481 (ios->timing == MMC_TIMING_UHS_SDR25)) 1482 ctrl |= SDHCI_CTRL_HISPD; 1483 1484 if (!host->preset_enabled) { 1485 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1486 /* 1487 * We only need to set Driver Strength if the 1488 * preset value enable is not set. 1489 */ 1490 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1491 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1492 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1493 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1494 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 1495 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1496 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1497 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1498 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 1499 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 1500 else { 1501 pr_warn("%s: invalid driver type, default to driver type B\n", 1502 mmc_hostname(mmc)); 1503 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1504 } 1505 1506 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1507 } else { 1508 /* 1509 * According to SDHC Spec v3.00, if the Preset Value 1510 * Enable in the Host Control 2 register is set, we 1511 * need to reset SD Clock Enable before changing High 1512 * Speed Enable to avoid generating clock gliches. 1513 */ 1514 1515 /* Reset SD Clock Enable */ 1516 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1517 clk &= ~SDHCI_CLOCK_CARD_EN; 1518 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1519 1520 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1521 1522 /* Re-enable SD Clock */ 1523 host->ops->set_clock(host, host->clock); 1524 } 1525 1526 /* Reset SD Clock Enable */ 1527 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1528 clk &= ~SDHCI_CLOCK_CARD_EN; 1529 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1530 1531 host->ops->set_uhs_signaling(host, ios->timing); 1532 host->timing = ios->timing; 1533 1534 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1535 ((ios->timing == MMC_TIMING_UHS_SDR12) || 1536 (ios->timing == MMC_TIMING_UHS_SDR25) || 1537 (ios->timing == MMC_TIMING_UHS_SDR50) || 1538 (ios->timing == MMC_TIMING_UHS_SDR104) || 1539 (ios->timing == MMC_TIMING_UHS_DDR50) || 1540 (ios->timing == MMC_TIMING_MMC_DDR52))) { 1541 u16 preset; 1542 1543 sdhci_enable_preset_value(host, true); 1544 preset = sdhci_get_preset_value(host); 1545 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 1546 >> SDHCI_PRESET_DRV_SHIFT; 1547 } 1548 1549 /* Re-enable SD Clock */ 1550 host->ops->set_clock(host, host->clock); 1551 } else 1552 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1553 1554 /* 1555 * Some (ENE) controllers go apeshit on some ios operation, 1556 * signalling timeout and CRC errors even on CMD0. Resetting 1557 * it on each ios seems to solve the problem. 1558 */ 1559 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1560 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1561 1562 mmiowb(); 1563 spin_unlock_irqrestore(&host->lock, flags); 1564 } 1565 1566 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1567 { 1568 struct sdhci_host *host = mmc_priv(mmc); 1569 1570 sdhci_runtime_pm_get(host); 1571 sdhci_do_set_ios(host, ios); 1572 sdhci_runtime_pm_put(host); 1573 } 1574 1575 static int sdhci_do_get_cd(struct sdhci_host *host) 1576 { 1577 int gpio_cd = mmc_gpio_get_cd(host->mmc); 1578 1579 if (host->flags & SDHCI_DEVICE_DEAD) 1580 return 0; 1581 1582 /* If nonremovable, assume that the card is always present. */ 1583 if (host->mmc->caps & MMC_CAP_NONREMOVABLE) 1584 return 1; 1585 1586 /* 1587 * Try slot gpio detect, if defined it take precedence 1588 * over build in controller functionality 1589 */ 1590 if (!IS_ERR_VALUE(gpio_cd)) 1591 return !!gpio_cd; 1592 1593 /* If polling, assume that the card is always present. */ 1594 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1595 return 1; 1596 1597 /* Host native card detect */ 1598 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1599 } 1600 1601 static int sdhci_get_cd(struct mmc_host *mmc) 1602 { 1603 struct sdhci_host *host = mmc_priv(mmc); 1604 int ret; 1605 1606 sdhci_runtime_pm_get(host); 1607 ret = sdhci_do_get_cd(host); 1608 sdhci_runtime_pm_put(host); 1609 return ret; 1610 } 1611 1612 static int sdhci_check_ro(struct sdhci_host *host) 1613 { 1614 unsigned long flags; 1615 int is_readonly; 1616 1617 spin_lock_irqsave(&host->lock, flags); 1618 1619 if (host->flags & SDHCI_DEVICE_DEAD) 1620 is_readonly = 0; 1621 else if (host->ops->get_ro) 1622 is_readonly = host->ops->get_ro(host); 1623 else 1624 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1625 & SDHCI_WRITE_PROTECT); 1626 1627 spin_unlock_irqrestore(&host->lock, flags); 1628 1629 /* This quirk needs to be replaced by a callback-function later */ 1630 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1631 !is_readonly : is_readonly; 1632 } 1633 1634 #define SAMPLE_COUNT 5 1635 1636 static int sdhci_do_get_ro(struct sdhci_host *host) 1637 { 1638 int i, ro_count; 1639 1640 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1641 return sdhci_check_ro(host); 1642 1643 ro_count = 0; 1644 for (i = 0; i < SAMPLE_COUNT; i++) { 1645 if (sdhci_check_ro(host)) { 1646 if (++ro_count > SAMPLE_COUNT / 2) 1647 return 1; 1648 } 1649 msleep(30); 1650 } 1651 return 0; 1652 } 1653 1654 static void sdhci_hw_reset(struct mmc_host *mmc) 1655 { 1656 struct sdhci_host *host = mmc_priv(mmc); 1657 1658 if (host->ops && host->ops->hw_reset) 1659 host->ops->hw_reset(host); 1660 } 1661 1662 static int sdhci_get_ro(struct mmc_host *mmc) 1663 { 1664 struct sdhci_host *host = mmc_priv(mmc); 1665 int ret; 1666 1667 sdhci_runtime_pm_get(host); 1668 ret = sdhci_do_get_ro(host); 1669 sdhci_runtime_pm_put(host); 1670 return ret; 1671 } 1672 1673 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1674 { 1675 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 1676 if (enable) 1677 host->ier |= SDHCI_INT_CARD_INT; 1678 else 1679 host->ier &= ~SDHCI_INT_CARD_INT; 1680 1681 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1682 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1683 mmiowb(); 1684 } 1685 } 1686 1687 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1688 { 1689 struct sdhci_host *host = mmc_priv(mmc); 1690 unsigned long flags; 1691 1692 sdhci_runtime_pm_get(host); 1693 1694 spin_lock_irqsave(&host->lock, flags); 1695 if (enable) 1696 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1697 else 1698 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1699 1700 sdhci_enable_sdio_irq_nolock(host, enable); 1701 spin_unlock_irqrestore(&host->lock, flags); 1702 1703 sdhci_runtime_pm_put(host); 1704 } 1705 1706 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, 1707 struct mmc_ios *ios) 1708 { 1709 struct mmc_host *mmc = host->mmc; 1710 u16 ctrl; 1711 int ret; 1712 1713 /* 1714 * Signal Voltage Switching is only applicable for Host Controllers 1715 * v3.00 and above. 1716 */ 1717 if (host->version < SDHCI_SPEC_300) 1718 return 0; 1719 1720 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1721 1722 switch (ios->signal_voltage) { 1723 case MMC_SIGNAL_VOLTAGE_330: 1724 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1725 ctrl &= ~SDHCI_CTRL_VDD_180; 1726 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1727 1728 if (!IS_ERR(mmc->supply.vqmmc)) { 1729 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000, 1730 3600000); 1731 if (ret) { 1732 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 1733 mmc_hostname(mmc)); 1734 return -EIO; 1735 } 1736 } 1737 /* Wait for 5ms */ 1738 usleep_range(5000, 5500); 1739 1740 /* 3.3V regulator output should be stable within 5 ms */ 1741 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1742 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1743 return 0; 1744 1745 pr_warn("%s: 3.3V regulator output did not became stable\n", 1746 mmc_hostname(mmc)); 1747 1748 return -EAGAIN; 1749 case MMC_SIGNAL_VOLTAGE_180: 1750 if (!IS_ERR(mmc->supply.vqmmc)) { 1751 ret = regulator_set_voltage(mmc->supply.vqmmc, 1752 1700000, 1950000); 1753 if (ret) { 1754 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 1755 mmc_hostname(mmc)); 1756 return -EIO; 1757 } 1758 } 1759 1760 /* 1761 * Enable 1.8V Signal Enable in the Host Control2 1762 * register 1763 */ 1764 ctrl |= SDHCI_CTRL_VDD_180; 1765 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1766 1767 /* Some controller need to do more when switching */ 1768 if (host->ops->voltage_switch) 1769 host->ops->voltage_switch(host); 1770 1771 /* 1.8V regulator output should be stable within 5 ms */ 1772 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1773 if (ctrl & SDHCI_CTRL_VDD_180) 1774 return 0; 1775 1776 pr_warn("%s: 1.8V regulator output did not became stable\n", 1777 mmc_hostname(mmc)); 1778 1779 return -EAGAIN; 1780 case MMC_SIGNAL_VOLTAGE_120: 1781 if (!IS_ERR(mmc->supply.vqmmc)) { 1782 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000, 1783 1300000); 1784 if (ret) { 1785 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 1786 mmc_hostname(mmc)); 1787 return -EIO; 1788 } 1789 } 1790 return 0; 1791 default: 1792 /* No signal voltage switch required */ 1793 return 0; 1794 } 1795 } 1796 1797 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1798 struct mmc_ios *ios) 1799 { 1800 struct sdhci_host *host = mmc_priv(mmc); 1801 int err; 1802 1803 if (host->version < SDHCI_SPEC_300) 1804 return 0; 1805 sdhci_runtime_pm_get(host); 1806 err = sdhci_do_start_signal_voltage_switch(host, ios); 1807 sdhci_runtime_pm_put(host); 1808 return err; 1809 } 1810 1811 static int sdhci_card_busy(struct mmc_host *mmc) 1812 { 1813 struct sdhci_host *host = mmc_priv(mmc); 1814 u32 present_state; 1815 1816 sdhci_runtime_pm_get(host); 1817 /* Check whether DAT[3:0] is 0000 */ 1818 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1819 sdhci_runtime_pm_put(host); 1820 1821 return !(present_state & SDHCI_DATA_LVL_MASK); 1822 } 1823 1824 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 1825 { 1826 struct sdhci_host *host = mmc_priv(mmc); 1827 unsigned long flags; 1828 1829 spin_lock_irqsave(&host->lock, flags); 1830 host->flags |= SDHCI_HS400_TUNING; 1831 spin_unlock_irqrestore(&host->lock, flags); 1832 1833 return 0; 1834 } 1835 1836 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1837 { 1838 struct sdhci_host *host = mmc_priv(mmc); 1839 u16 ctrl; 1840 int tuning_loop_counter = MAX_TUNING_LOOP; 1841 int err = 0; 1842 unsigned long flags; 1843 unsigned int tuning_count = 0; 1844 bool hs400_tuning; 1845 1846 sdhci_runtime_pm_get(host); 1847 spin_lock_irqsave(&host->lock, flags); 1848 1849 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 1850 host->flags &= ~SDHCI_HS400_TUNING; 1851 1852 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 1853 tuning_count = host->tuning_count; 1854 1855 /* 1856 * The Host Controller needs tuning in case of SDR104 and DDR50 1857 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 1858 * the Capabilities register. 1859 * If the Host Controller supports the HS200 mode then the 1860 * tuning function has to be executed. 1861 */ 1862 switch (host->timing) { 1863 /* HS400 tuning is done in HS200 mode */ 1864 case MMC_TIMING_MMC_HS400: 1865 err = -EINVAL; 1866 goto out_unlock; 1867 1868 case MMC_TIMING_MMC_HS200: 1869 /* 1870 * Periodic re-tuning for HS400 is not expected to be needed, so 1871 * disable it here. 1872 */ 1873 if (hs400_tuning) 1874 tuning_count = 0; 1875 break; 1876 1877 case MMC_TIMING_UHS_SDR104: 1878 case MMC_TIMING_UHS_DDR50: 1879 break; 1880 1881 case MMC_TIMING_UHS_SDR50: 1882 if (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1883 host->flags & SDHCI_SDR104_NEEDS_TUNING) 1884 break; 1885 /* FALLTHROUGH */ 1886 1887 default: 1888 goto out_unlock; 1889 } 1890 1891 if (host->ops->platform_execute_tuning) { 1892 spin_unlock_irqrestore(&host->lock, flags); 1893 err = host->ops->platform_execute_tuning(host, opcode); 1894 sdhci_runtime_pm_put(host); 1895 return err; 1896 } 1897 1898 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1899 ctrl |= SDHCI_CTRL_EXEC_TUNING; 1900 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 1901 ctrl |= SDHCI_CTRL_TUNED_CLK; 1902 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1903 1904 /* 1905 * As per the Host Controller spec v3.00, tuning command 1906 * generates Buffer Read Ready interrupt, so enable that. 1907 * 1908 * Note: The spec clearly says that when tuning sequence 1909 * is being performed, the controller does not generate 1910 * interrupts other than Buffer Read Ready interrupt. But 1911 * to make sure we don't hit a controller bug, we _only_ 1912 * enable Buffer Read Ready interrupt here. 1913 */ 1914 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 1915 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 1916 1917 /* 1918 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number 1919 * of loops reaches 40 times or a timeout of 150ms occurs. 1920 */ 1921 do { 1922 struct mmc_command cmd = {0}; 1923 struct mmc_request mrq = {NULL}; 1924 1925 cmd.opcode = opcode; 1926 cmd.arg = 0; 1927 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1928 cmd.retries = 0; 1929 cmd.data = NULL; 1930 cmd.error = 0; 1931 1932 if (tuning_loop_counter-- == 0) 1933 break; 1934 1935 mrq.cmd = &cmd; 1936 host->mrq = &mrq; 1937 1938 /* 1939 * In response to CMD19, the card sends 64 bytes of tuning 1940 * block to the Host Controller. So we set the block size 1941 * to 64 here. 1942 */ 1943 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { 1944 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) 1945 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), 1946 SDHCI_BLOCK_SIZE); 1947 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) 1948 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1949 SDHCI_BLOCK_SIZE); 1950 } else { 1951 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1952 SDHCI_BLOCK_SIZE); 1953 } 1954 1955 /* 1956 * The tuning block is sent by the card to the host controller. 1957 * So we set the TRNS_READ bit in the Transfer Mode register. 1958 * This also takes care of setting DMA Enable and Multi Block 1959 * Select in the same register to 0. 1960 */ 1961 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 1962 1963 sdhci_send_command(host, &cmd); 1964 1965 host->cmd = NULL; 1966 host->mrq = NULL; 1967 1968 spin_unlock_irqrestore(&host->lock, flags); 1969 /* Wait for Buffer Read Ready interrupt */ 1970 wait_event_interruptible_timeout(host->buf_ready_int, 1971 (host->tuning_done == 1), 1972 msecs_to_jiffies(50)); 1973 spin_lock_irqsave(&host->lock, flags); 1974 1975 if (!host->tuning_done) { 1976 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); 1977 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1978 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1979 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 1980 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1981 1982 err = -EIO; 1983 goto out; 1984 } 1985 1986 host->tuning_done = 0; 1987 1988 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1989 1990 /* eMMC spec does not require a delay between tuning cycles */ 1991 if (opcode == MMC_SEND_TUNING_BLOCK) 1992 mdelay(1); 1993 } while (ctrl & SDHCI_CTRL_EXEC_TUNING); 1994 1995 /* 1996 * The Host Driver has exhausted the maximum number of loops allowed, 1997 * so use fixed sampling frequency. 1998 */ 1999 if (tuning_loop_counter < 0) { 2000 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2001 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2002 } 2003 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 2004 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n"); 2005 err = -EIO; 2006 } 2007 2008 out: 2009 if (tuning_count) { 2010 /* 2011 * In case tuning fails, host controllers which support 2012 * re-tuning can try tuning again at a later time, when the 2013 * re-tuning timer expires. So for these controllers, we 2014 * return 0. Since there might be other controllers who do not 2015 * have this capability, we return error for them. 2016 */ 2017 err = 0; 2018 } 2019 2020 host->mmc->retune_period = err ? 0 : tuning_count; 2021 2022 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2023 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2024 out_unlock: 2025 spin_unlock_irqrestore(&host->lock, flags); 2026 sdhci_runtime_pm_put(host); 2027 2028 return err; 2029 } 2030 2031 static int sdhci_select_drive_strength(struct mmc_card *card, 2032 unsigned int max_dtr, int host_drv, 2033 int card_drv, int *drv_type) 2034 { 2035 struct sdhci_host *host = mmc_priv(card->host); 2036 2037 if (!host->ops->select_drive_strength) 2038 return 0; 2039 2040 return host->ops->select_drive_strength(host, card, max_dtr, host_drv, 2041 card_drv, drv_type); 2042 } 2043 2044 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2045 { 2046 /* Host Controller v3.00 defines preset value registers */ 2047 if (host->version < SDHCI_SPEC_300) 2048 return; 2049 2050 /* 2051 * We only enable or disable Preset Value if they are not already 2052 * enabled or disabled respectively. Otherwise, we bail out. 2053 */ 2054 if (host->preset_enabled != enable) { 2055 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2056 2057 if (enable) 2058 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2059 else 2060 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2061 2062 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2063 2064 if (enable) 2065 host->flags |= SDHCI_PV_ENABLED; 2066 else 2067 host->flags &= ~SDHCI_PV_ENABLED; 2068 2069 host->preset_enabled = enable; 2070 } 2071 } 2072 2073 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2074 int err) 2075 { 2076 struct sdhci_host *host = mmc_priv(mmc); 2077 struct mmc_data *data = mrq->data; 2078 2079 if (data->host_cookie != COOKIE_UNMAPPED) 2080 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2081 data->flags & MMC_DATA_WRITE ? 2082 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2083 2084 data->host_cookie = COOKIE_UNMAPPED; 2085 } 2086 2087 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 2088 bool is_first_req) 2089 { 2090 struct sdhci_host *host = mmc_priv(mmc); 2091 2092 mrq->data->host_cookie = COOKIE_UNMAPPED; 2093 2094 if (host->flags & SDHCI_REQ_USE_DMA) 2095 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2096 } 2097 2098 static void sdhci_card_event(struct mmc_host *mmc) 2099 { 2100 struct sdhci_host *host = mmc_priv(mmc); 2101 unsigned long flags; 2102 int present; 2103 2104 /* First check if client has provided their own card event */ 2105 if (host->ops->card_event) 2106 host->ops->card_event(host); 2107 2108 present = sdhci_do_get_cd(host); 2109 2110 spin_lock_irqsave(&host->lock, flags); 2111 2112 /* Check host->mrq first in case we are runtime suspended */ 2113 if (host->mrq && !present) { 2114 pr_err("%s: Card removed during transfer!\n", 2115 mmc_hostname(host->mmc)); 2116 pr_err("%s: Resetting controller.\n", 2117 mmc_hostname(host->mmc)); 2118 2119 sdhci_do_reset(host, SDHCI_RESET_CMD); 2120 sdhci_do_reset(host, SDHCI_RESET_DATA); 2121 2122 host->mrq->cmd->error = -ENOMEDIUM; 2123 tasklet_schedule(&host->finish_tasklet); 2124 } 2125 2126 spin_unlock_irqrestore(&host->lock, flags); 2127 } 2128 2129 static const struct mmc_host_ops sdhci_ops = { 2130 .request = sdhci_request, 2131 .post_req = sdhci_post_req, 2132 .pre_req = sdhci_pre_req, 2133 .set_ios = sdhci_set_ios, 2134 .get_cd = sdhci_get_cd, 2135 .get_ro = sdhci_get_ro, 2136 .hw_reset = sdhci_hw_reset, 2137 .enable_sdio_irq = sdhci_enable_sdio_irq, 2138 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2139 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2140 .execute_tuning = sdhci_execute_tuning, 2141 .select_drive_strength = sdhci_select_drive_strength, 2142 .card_event = sdhci_card_event, 2143 .card_busy = sdhci_card_busy, 2144 }; 2145 2146 /*****************************************************************************\ 2147 * * 2148 * Tasklets * 2149 * * 2150 \*****************************************************************************/ 2151 2152 static void sdhci_tasklet_finish(unsigned long param) 2153 { 2154 struct sdhci_host *host; 2155 unsigned long flags; 2156 struct mmc_request *mrq; 2157 2158 host = (struct sdhci_host*)param; 2159 2160 spin_lock_irqsave(&host->lock, flags); 2161 2162 /* 2163 * If this tasklet gets rescheduled while running, it will 2164 * be run again afterwards but without any active request. 2165 */ 2166 if (!host->mrq) { 2167 spin_unlock_irqrestore(&host->lock, flags); 2168 return; 2169 } 2170 2171 del_timer(&host->timer); 2172 2173 mrq = host->mrq; 2174 2175 /* 2176 * Always unmap the data buffers if they were mapped by 2177 * sdhci_prepare_data() whenever we finish with a request. 2178 * This avoids leaking DMA mappings on error. 2179 */ 2180 if (host->flags & SDHCI_REQ_USE_DMA) { 2181 struct mmc_data *data = mrq->data; 2182 2183 if (data && data->host_cookie == COOKIE_MAPPED) { 2184 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2185 (data->flags & MMC_DATA_READ) ? 2186 DMA_FROM_DEVICE : DMA_TO_DEVICE); 2187 data->host_cookie = COOKIE_UNMAPPED; 2188 } 2189 } 2190 2191 /* 2192 * The controller needs a reset of internal state machines 2193 * upon error conditions. 2194 */ 2195 if (!(host->flags & SDHCI_DEVICE_DEAD) && 2196 ((mrq->cmd && mrq->cmd->error) || 2197 (mrq->sbc && mrq->sbc->error) || 2198 (mrq->data && ((mrq->data->error && !mrq->data->stop) || 2199 (mrq->data->stop && mrq->data->stop->error))) || 2200 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2201 2202 /* Some controllers need this kick or reset won't work here */ 2203 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2204 /* This is to force an update */ 2205 host->ops->set_clock(host, host->clock); 2206 2207 /* Spec says we should do both at the same time, but Ricoh 2208 controllers do not like that. */ 2209 sdhci_do_reset(host, SDHCI_RESET_CMD); 2210 sdhci_do_reset(host, SDHCI_RESET_DATA); 2211 } 2212 2213 host->mrq = NULL; 2214 host->cmd = NULL; 2215 host->data = NULL; 2216 2217 #ifndef SDHCI_USE_LEDS_CLASS 2218 sdhci_deactivate_led(host); 2219 #endif 2220 2221 mmiowb(); 2222 spin_unlock_irqrestore(&host->lock, flags); 2223 2224 mmc_request_done(host->mmc, mrq); 2225 sdhci_runtime_pm_put(host); 2226 } 2227 2228 static void sdhci_timeout_timer(unsigned long data) 2229 { 2230 struct sdhci_host *host; 2231 unsigned long flags; 2232 2233 host = (struct sdhci_host*)data; 2234 2235 spin_lock_irqsave(&host->lock, flags); 2236 2237 if (host->mrq) { 2238 pr_err("%s: Timeout waiting for hardware interrupt.\n", 2239 mmc_hostname(host->mmc)); 2240 sdhci_dumpregs(host); 2241 2242 if (host->data) { 2243 host->data->error = -ETIMEDOUT; 2244 sdhci_finish_data(host); 2245 } else { 2246 if (host->cmd) 2247 host->cmd->error = -ETIMEDOUT; 2248 else 2249 host->mrq->cmd->error = -ETIMEDOUT; 2250 2251 tasklet_schedule(&host->finish_tasklet); 2252 } 2253 } 2254 2255 mmiowb(); 2256 spin_unlock_irqrestore(&host->lock, flags); 2257 } 2258 2259 /*****************************************************************************\ 2260 * * 2261 * Interrupt handling * 2262 * * 2263 \*****************************************************************************/ 2264 2265 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) 2266 { 2267 BUG_ON(intmask == 0); 2268 2269 if (!host->cmd) { 2270 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 2271 mmc_hostname(host->mmc), (unsigned)intmask); 2272 sdhci_dumpregs(host); 2273 return; 2274 } 2275 2276 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 2277 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 2278 if (intmask & SDHCI_INT_TIMEOUT) 2279 host->cmd->error = -ETIMEDOUT; 2280 else 2281 host->cmd->error = -EILSEQ; 2282 2283 /* 2284 * If this command initiates a data phase and a response 2285 * CRC error is signalled, the card can start transferring 2286 * data - the card may have received the command without 2287 * error. We must not terminate the mmc_request early. 2288 * 2289 * If the card did not receive the command or returned an 2290 * error which prevented it sending data, the data phase 2291 * will time out. 2292 */ 2293 if (host->cmd->data && 2294 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 2295 SDHCI_INT_CRC) { 2296 host->cmd = NULL; 2297 return; 2298 } 2299 2300 tasklet_schedule(&host->finish_tasklet); 2301 return; 2302 } 2303 2304 /* 2305 * The host can send and interrupt when the busy state has 2306 * ended, allowing us to wait without wasting CPU cycles. 2307 * Unfortunately this is overloaded on the "data complete" 2308 * interrupt, so we need to take some care when handling 2309 * it. 2310 * 2311 * Note: The 1.0 specification is a bit ambiguous about this 2312 * feature so there might be some problems with older 2313 * controllers. 2314 */ 2315 if (host->cmd->flags & MMC_RSP_BUSY) { 2316 if (host->cmd->data) 2317 DBG("Cannot wait for busy signal when also doing a data transfer"); 2318 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) 2319 && !host->busy_handle) { 2320 /* Mark that command complete before busy is ended */ 2321 host->busy_handle = 1; 2322 return; 2323 } 2324 2325 /* The controller does not support the end-of-busy IRQ, 2326 * fall through and take the SDHCI_INT_RESPONSE */ 2327 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 2328 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) { 2329 *mask &= ~SDHCI_INT_DATA_END; 2330 } 2331 2332 if (intmask & SDHCI_INT_RESPONSE) 2333 sdhci_finish_command(host); 2334 } 2335 2336 #ifdef CONFIG_MMC_DEBUG 2337 static void sdhci_adma_show_error(struct sdhci_host *host) 2338 { 2339 const char *name = mmc_hostname(host->mmc); 2340 void *desc = host->adma_table; 2341 2342 sdhci_dumpregs(host); 2343 2344 while (true) { 2345 struct sdhci_adma2_64_desc *dma_desc = desc; 2346 2347 if (host->flags & SDHCI_USE_64_BIT_DMA) 2348 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2349 name, desc, le32_to_cpu(dma_desc->addr_hi), 2350 le32_to_cpu(dma_desc->addr_lo), 2351 le16_to_cpu(dma_desc->len), 2352 le16_to_cpu(dma_desc->cmd)); 2353 else 2354 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2355 name, desc, le32_to_cpu(dma_desc->addr_lo), 2356 le16_to_cpu(dma_desc->len), 2357 le16_to_cpu(dma_desc->cmd)); 2358 2359 desc += host->desc_sz; 2360 2361 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2362 break; 2363 } 2364 } 2365 #else 2366 static void sdhci_adma_show_error(struct sdhci_host *host) { } 2367 #endif 2368 2369 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2370 { 2371 u32 command; 2372 BUG_ON(intmask == 0); 2373 2374 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2375 if (intmask & SDHCI_INT_DATA_AVAIL) { 2376 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2377 if (command == MMC_SEND_TUNING_BLOCK || 2378 command == MMC_SEND_TUNING_BLOCK_HS200) { 2379 host->tuning_done = 1; 2380 wake_up(&host->buf_ready_int); 2381 return; 2382 } 2383 } 2384 2385 if (!host->data) { 2386 /* 2387 * The "data complete" interrupt is also used to 2388 * indicate that a busy state has ended. See comment 2389 * above in sdhci_cmd_irq(). 2390 */ 2391 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { 2392 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2393 host->cmd->error = -ETIMEDOUT; 2394 tasklet_schedule(&host->finish_tasklet); 2395 return; 2396 } 2397 if (intmask & SDHCI_INT_DATA_END) { 2398 /* 2399 * Some cards handle busy-end interrupt 2400 * before the command completed, so make 2401 * sure we do things in the proper order. 2402 */ 2403 if (host->busy_handle) 2404 sdhci_finish_command(host); 2405 else 2406 host->busy_handle = 1; 2407 return; 2408 } 2409 } 2410 2411 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 2412 mmc_hostname(host->mmc), (unsigned)intmask); 2413 sdhci_dumpregs(host); 2414 2415 return; 2416 } 2417 2418 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2419 host->data->error = -ETIMEDOUT; 2420 else if (intmask & SDHCI_INT_DATA_END_BIT) 2421 host->data->error = -EILSEQ; 2422 else if ((intmask & SDHCI_INT_DATA_CRC) && 2423 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2424 != MMC_BUS_TEST_R) 2425 host->data->error = -EILSEQ; 2426 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2427 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2428 sdhci_adma_show_error(host); 2429 host->data->error = -EIO; 2430 if (host->ops->adma_workaround) 2431 host->ops->adma_workaround(host, intmask); 2432 } 2433 2434 if (host->data->error) 2435 sdhci_finish_data(host); 2436 else { 2437 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2438 sdhci_transfer_pio(host); 2439 2440 /* 2441 * We currently don't do anything fancy with DMA 2442 * boundaries, but as we can't disable the feature 2443 * we need to at least restart the transfer. 2444 * 2445 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2446 * should return a valid address to continue from, but as 2447 * some controllers are faulty, don't trust them. 2448 */ 2449 if (intmask & SDHCI_INT_DMA_END) { 2450 u32 dmastart, dmanow; 2451 dmastart = sg_dma_address(host->data->sg); 2452 dmanow = dmastart + host->data->bytes_xfered; 2453 /* 2454 * Force update to the next DMA block boundary. 2455 */ 2456 dmanow = (dmanow & 2457 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2458 SDHCI_DEFAULT_BOUNDARY_SIZE; 2459 host->data->bytes_xfered = dmanow - dmastart; 2460 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," 2461 " next 0x%08x\n", 2462 mmc_hostname(host->mmc), dmastart, 2463 host->data->bytes_xfered, dmanow); 2464 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2465 } 2466 2467 if (intmask & SDHCI_INT_DATA_END) { 2468 if (host->cmd) { 2469 /* 2470 * Data managed to finish before the 2471 * command completed. Make sure we do 2472 * things in the proper order. 2473 */ 2474 host->data_early = 1; 2475 } else { 2476 sdhci_finish_data(host); 2477 } 2478 } 2479 } 2480 } 2481 2482 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2483 { 2484 irqreturn_t result = IRQ_NONE; 2485 struct sdhci_host *host = dev_id; 2486 u32 intmask, mask, unexpected = 0; 2487 int max_loops = 16; 2488 2489 spin_lock(&host->lock); 2490 2491 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2492 spin_unlock(&host->lock); 2493 return IRQ_NONE; 2494 } 2495 2496 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2497 if (!intmask || intmask == 0xffffffff) { 2498 result = IRQ_NONE; 2499 goto out; 2500 } 2501 2502 do { 2503 /* Clear selected interrupts. */ 2504 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2505 SDHCI_INT_BUS_POWER); 2506 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2507 2508 DBG("*** %s got interrupt: 0x%08x\n", 2509 mmc_hostname(host->mmc), intmask); 2510 2511 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2512 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2513 SDHCI_CARD_PRESENT; 2514 2515 /* 2516 * There is a observation on i.mx esdhc. INSERT 2517 * bit will be immediately set again when it gets 2518 * cleared, if a card is inserted. We have to mask 2519 * the irq to prevent interrupt storm which will 2520 * freeze the system. And the REMOVE gets the 2521 * same situation. 2522 * 2523 * More testing are needed here to ensure it works 2524 * for other platforms though. 2525 */ 2526 host->ier &= ~(SDHCI_INT_CARD_INSERT | 2527 SDHCI_INT_CARD_REMOVE); 2528 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 2529 SDHCI_INT_CARD_INSERT; 2530 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2531 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2532 2533 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2534 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2535 2536 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 2537 SDHCI_INT_CARD_REMOVE); 2538 result = IRQ_WAKE_THREAD; 2539 } 2540 2541 if (intmask & SDHCI_INT_CMD_MASK) 2542 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, 2543 &intmask); 2544 2545 if (intmask & SDHCI_INT_DATA_MASK) 2546 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2547 2548 if (intmask & SDHCI_INT_BUS_POWER) 2549 pr_err("%s: Card is consuming too much power!\n", 2550 mmc_hostname(host->mmc)); 2551 2552 if (intmask & SDHCI_INT_CARD_INT) { 2553 sdhci_enable_sdio_irq_nolock(host, false); 2554 host->thread_isr |= SDHCI_INT_CARD_INT; 2555 result = IRQ_WAKE_THREAD; 2556 } 2557 2558 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 2559 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2560 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 2561 SDHCI_INT_CARD_INT); 2562 2563 if (intmask) { 2564 unexpected |= intmask; 2565 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2566 } 2567 2568 if (result == IRQ_NONE) 2569 result = IRQ_HANDLED; 2570 2571 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2572 } while (intmask && --max_loops); 2573 out: 2574 spin_unlock(&host->lock); 2575 2576 if (unexpected) { 2577 pr_err("%s: Unexpected interrupt 0x%08x.\n", 2578 mmc_hostname(host->mmc), unexpected); 2579 sdhci_dumpregs(host); 2580 } 2581 2582 return result; 2583 } 2584 2585 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 2586 { 2587 struct sdhci_host *host = dev_id; 2588 unsigned long flags; 2589 u32 isr; 2590 2591 spin_lock_irqsave(&host->lock, flags); 2592 isr = host->thread_isr; 2593 host->thread_isr = 0; 2594 spin_unlock_irqrestore(&host->lock, flags); 2595 2596 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2597 sdhci_card_event(host->mmc); 2598 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 2599 } 2600 2601 if (isr & SDHCI_INT_CARD_INT) { 2602 sdio_run_irqs(host->mmc); 2603 2604 spin_lock_irqsave(&host->lock, flags); 2605 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2606 sdhci_enable_sdio_irq_nolock(host, true); 2607 spin_unlock_irqrestore(&host->lock, flags); 2608 } 2609 2610 return isr ? IRQ_HANDLED : IRQ_NONE; 2611 } 2612 2613 /*****************************************************************************\ 2614 * * 2615 * Suspend/resume * 2616 * * 2617 \*****************************************************************************/ 2618 2619 #ifdef CONFIG_PM 2620 void sdhci_enable_irq_wakeups(struct sdhci_host *host) 2621 { 2622 u8 val; 2623 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2624 | SDHCI_WAKE_ON_INT; 2625 2626 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2627 val |= mask ; 2628 /* Avoid fake wake up */ 2629 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2630 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); 2631 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2632 } 2633 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 2634 2635 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 2636 { 2637 u8 val; 2638 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2639 | SDHCI_WAKE_ON_INT; 2640 2641 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2642 val &= ~mask; 2643 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2644 } 2645 2646 int sdhci_suspend_host(struct sdhci_host *host) 2647 { 2648 sdhci_disable_card_detection(host); 2649 2650 mmc_retune_timer_stop(host->mmc); 2651 mmc_retune_needed(host->mmc); 2652 2653 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2654 host->ier = 0; 2655 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 2656 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2657 free_irq(host->irq, host); 2658 } else { 2659 sdhci_enable_irq_wakeups(host); 2660 enable_irq_wake(host->irq); 2661 } 2662 return 0; 2663 } 2664 2665 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 2666 2667 int sdhci_resume_host(struct sdhci_host *host) 2668 { 2669 int ret = 0; 2670 2671 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2672 if (host->ops->enable_dma) 2673 host->ops->enable_dma(host); 2674 } 2675 2676 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2677 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2678 /* Card keeps power but host controller does not */ 2679 sdhci_init(host, 0); 2680 host->pwr = 0; 2681 host->clock = 0; 2682 sdhci_do_set_ios(host, &host->mmc->ios); 2683 } else { 2684 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2685 mmiowb(); 2686 } 2687 2688 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2689 ret = request_threaded_irq(host->irq, sdhci_irq, 2690 sdhci_thread_irq, IRQF_SHARED, 2691 mmc_hostname(host->mmc), host); 2692 if (ret) 2693 return ret; 2694 } else { 2695 sdhci_disable_irq_wakeups(host); 2696 disable_irq_wake(host->irq); 2697 } 2698 2699 sdhci_enable_card_detection(host); 2700 2701 return ret; 2702 } 2703 2704 EXPORT_SYMBOL_GPL(sdhci_resume_host); 2705 2706 static int sdhci_runtime_pm_get(struct sdhci_host *host) 2707 { 2708 return pm_runtime_get_sync(host->mmc->parent); 2709 } 2710 2711 static int sdhci_runtime_pm_put(struct sdhci_host *host) 2712 { 2713 pm_runtime_mark_last_busy(host->mmc->parent); 2714 return pm_runtime_put_autosuspend(host->mmc->parent); 2715 } 2716 2717 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 2718 { 2719 if (host->bus_on) 2720 return; 2721 host->bus_on = true; 2722 pm_runtime_get_noresume(host->mmc->parent); 2723 } 2724 2725 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 2726 { 2727 if (!host->bus_on) 2728 return; 2729 host->bus_on = false; 2730 pm_runtime_put_noidle(host->mmc->parent); 2731 } 2732 2733 int sdhci_runtime_suspend_host(struct sdhci_host *host) 2734 { 2735 unsigned long flags; 2736 2737 mmc_retune_timer_stop(host->mmc); 2738 mmc_retune_needed(host->mmc); 2739 2740 spin_lock_irqsave(&host->lock, flags); 2741 host->ier &= SDHCI_INT_CARD_INT; 2742 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2743 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2744 spin_unlock_irqrestore(&host->lock, flags); 2745 2746 synchronize_hardirq(host->irq); 2747 2748 spin_lock_irqsave(&host->lock, flags); 2749 host->runtime_suspended = true; 2750 spin_unlock_irqrestore(&host->lock, flags); 2751 2752 return 0; 2753 } 2754 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 2755 2756 int sdhci_runtime_resume_host(struct sdhci_host *host) 2757 { 2758 unsigned long flags; 2759 int host_flags = host->flags; 2760 2761 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2762 if (host->ops->enable_dma) 2763 host->ops->enable_dma(host); 2764 } 2765 2766 sdhci_init(host, 0); 2767 2768 /* Force clock and power re-program */ 2769 host->pwr = 0; 2770 host->clock = 0; 2771 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); 2772 sdhci_do_set_ios(host, &host->mmc->ios); 2773 2774 if ((host_flags & SDHCI_PV_ENABLED) && 2775 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 2776 spin_lock_irqsave(&host->lock, flags); 2777 sdhci_enable_preset_value(host, true); 2778 spin_unlock_irqrestore(&host->lock, flags); 2779 } 2780 2781 spin_lock_irqsave(&host->lock, flags); 2782 2783 host->runtime_suspended = false; 2784 2785 /* Enable SDIO IRQ */ 2786 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2787 sdhci_enable_sdio_irq_nolock(host, true); 2788 2789 /* Enable Card Detection */ 2790 sdhci_enable_card_detection(host); 2791 2792 spin_unlock_irqrestore(&host->lock, flags); 2793 2794 return 0; 2795 } 2796 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2797 2798 #endif /* CONFIG_PM */ 2799 2800 /*****************************************************************************\ 2801 * * 2802 * Device allocation/registration * 2803 * * 2804 \*****************************************************************************/ 2805 2806 struct sdhci_host *sdhci_alloc_host(struct device *dev, 2807 size_t priv_size) 2808 { 2809 struct mmc_host *mmc; 2810 struct sdhci_host *host; 2811 2812 WARN_ON(dev == NULL); 2813 2814 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 2815 if (!mmc) 2816 return ERR_PTR(-ENOMEM); 2817 2818 host = mmc_priv(mmc); 2819 host->mmc = mmc; 2820 host->mmc_host_ops = sdhci_ops; 2821 mmc->ops = &host->mmc_host_ops; 2822 2823 return host; 2824 } 2825 2826 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 2827 2828 static int sdhci_set_dma_mask(struct sdhci_host *host) 2829 { 2830 struct mmc_host *mmc = host->mmc; 2831 struct device *dev = mmc_dev(mmc); 2832 int ret = -EINVAL; 2833 2834 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 2835 host->flags &= ~SDHCI_USE_64_BIT_DMA; 2836 2837 /* Try 64-bit mask if hardware is capable of it */ 2838 if (host->flags & SDHCI_USE_64_BIT_DMA) { 2839 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2840 if (ret) { 2841 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 2842 mmc_hostname(mmc)); 2843 host->flags &= ~SDHCI_USE_64_BIT_DMA; 2844 } 2845 } 2846 2847 /* 32-bit mask as default & fallback */ 2848 if (ret) { 2849 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2850 if (ret) 2851 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 2852 mmc_hostname(mmc)); 2853 } 2854 2855 return ret; 2856 } 2857 2858 int sdhci_add_host(struct sdhci_host *host) 2859 { 2860 struct mmc_host *mmc; 2861 u32 caps[2] = {0, 0}; 2862 u32 max_current_caps; 2863 unsigned int ocr_avail; 2864 unsigned int override_timeout_clk; 2865 u32 max_clk; 2866 int ret; 2867 2868 WARN_ON(host == NULL); 2869 if (host == NULL) 2870 return -EINVAL; 2871 2872 mmc = host->mmc; 2873 2874 if (debug_quirks) 2875 host->quirks = debug_quirks; 2876 if (debug_quirks2) 2877 host->quirks2 = debug_quirks2; 2878 2879 override_timeout_clk = host->timeout_clk; 2880 2881 sdhci_do_reset(host, SDHCI_RESET_ALL); 2882 2883 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2884 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2885 >> SDHCI_SPEC_VER_SHIFT; 2886 if (host->version > SDHCI_SPEC_300) { 2887 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 2888 mmc_hostname(mmc), host->version); 2889 } 2890 2891 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2892 sdhci_readl(host, SDHCI_CAPABILITIES); 2893 2894 if (host->version >= SDHCI_SPEC_300) 2895 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? 2896 host->caps1 : 2897 sdhci_readl(host, SDHCI_CAPABILITIES_1); 2898 2899 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2900 host->flags |= SDHCI_USE_SDMA; 2901 else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) 2902 DBG("Controller doesn't have SDMA capability\n"); 2903 else 2904 host->flags |= SDHCI_USE_SDMA; 2905 2906 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 2907 (host->flags & SDHCI_USE_SDMA)) { 2908 DBG("Disabling DMA as it is marked broken\n"); 2909 host->flags &= ~SDHCI_USE_SDMA; 2910 } 2911 2912 if ((host->version >= SDHCI_SPEC_200) && 2913 (caps[0] & SDHCI_CAN_DO_ADMA2)) 2914 host->flags |= SDHCI_USE_ADMA; 2915 2916 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2917 (host->flags & SDHCI_USE_ADMA)) { 2918 DBG("Disabling ADMA as it is marked broken\n"); 2919 host->flags &= ~SDHCI_USE_ADMA; 2920 } 2921 2922 /* 2923 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask 2924 * and *must* do 64-bit DMA. A driver has the opportunity to change 2925 * that during the first call to ->enable_dma(). Similarly 2926 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to 2927 * implement. 2928 */ 2929 if (caps[0] & SDHCI_CAN_64BIT) 2930 host->flags |= SDHCI_USE_64_BIT_DMA; 2931 2932 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2933 ret = sdhci_set_dma_mask(host); 2934 2935 if (!ret && host->ops->enable_dma) 2936 ret = host->ops->enable_dma(host); 2937 2938 if (ret) { 2939 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 2940 mmc_hostname(mmc)); 2941 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 2942 2943 ret = 0; 2944 } 2945 } 2946 2947 /* SDMA does not support 64-bit DMA */ 2948 if (host->flags & SDHCI_USE_64_BIT_DMA) 2949 host->flags &= ~SDHCI_USE_SDMA; 2950 2951 if (host->flags & SDHCI_USE_ADMA) { 2952 dma_addr_t dma; 2953 void *buf; 2954 2955 /* 2956 * The DMA descriptor table size is calculated as the maximum 2957 * number of segments times 2, to allow for an alignment 2958 * descriptor for each segment, plus 1 for a nop end descriptor, 2959 * all multipled by the descriptor size. 2960 */ 2961 if (host->flags & SDHCI_USE_64_BIT_DMA) { 2962 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 2963 SDHCI_ADMA2_64_DESC_SZ; 2964 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; 2965 } else { 2966 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 2967 SDHCI_ADMA2_32_DESC_SZ; 2968 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 2969 } 2970 2971 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 2972 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 2973 host->adma_table_sz, &dma, GFP_KERNEL); 2974 if (!buf) { 2975 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2976 mmc_hostname(mmc)); 2977 host->flags &= ~SDHCI_USE_ADMA; 2978 } else if ((dma + host->align_buffer_sz) & 2979 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 2980 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 2981 mmc_hostname(mmc)); 2982 host->flags &= ~SDHCI_USE_ADMA; 2983 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 2984 host->adma_table_sz, buf, dma); 2985 } else { 2986 host->align_buffer = buf; 2987 host->align_addr = dma; 2988 2989 host->adma_table = buf + host->align_buffer_sz; 2990 host->adma_addr = dma + host->align_buffer_sz; 2991 } 2992 } 2993 2994 /* 2995 * If we use DMA, then it's up to the caller to set the DMA 2996 * mask, but PIO does not need the hw shim so we set a new 2997 * mask here in that case. 2998 */ 2999 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 3000 host->dma_mask = DMA_BIT_MASK(64); 3001 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3002 } 3003 3004 if (host->version >= SDHCI_SPEC_300) 3005 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) 3006 >> SDHCI_CLOCK_BASE_SHIFT; 3007 else 3008 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) 3009 >> SDHCI_CLOCK_BASE_SHIFT; 3010 3011 host->max_clk *= 1000000; 3012 if (host->max_clk == 0 || host->quirks & 3013 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 3014 if (!host->ops->get_max_clock) { 3015 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 3016 mmc_hostname(mmc)); 3017 return -ENODEV; 3018 } 3019 host->max_clk = host->ops->get_max_clock(host); 3020 } 3021 3022 /* 3023 * In case of Host Controller v3.00, find out whether clock 3024 * multiplier is supported. 3025 */ 3026 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> 3027 SDHCI_CLOCK_MUL_SHIFT; 3028 3029 /* 3030 * In case the value in Clock Multiplier is 0, then programmable 3031 * clock mode is not supported, otherwise the actual clock 3032 * multiplier is one more than the value of Clock Multiplier 3033 * in the Capabilities Register. 3034 */ 3035 if (host->clk_mul) 3036 host->clk_mul += 1; 3037 3038 /* 3039 * Set host parameters. 3040 */ 3041 max_clk = host->max_clk; 3042 3043 if (host->ops->get_min_clock) 3044 mmc->f_min = host->ops->get_min_clock(host); 3045 else if (host->version >= SDHCI_SPEC_300) { 3046 if (host->clk_mul) { 3047 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3048 max_clk = host->max_clk * host->clk_mul; 3049 } else 3050 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3051 } else 3052 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3053 3054 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk))) 3055 mmc->f_max = max_clk; 3056 3057 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3058 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3059 SDHCI_TIMEOUT_CLK_SHIFT; 3060 if (host->timeout_clk == 0) { 3061 if (host->ops->get_timeout_clock) { 3062 host->timeout_clk = 3063 host->ops->get_timeout_clock(host); 3064 } else { 3065 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3066 mmc_hostname(mmc)); 3067 return -ENODEV; 3068 } 3069 } 3070 3071 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) 3072 host->timeout_clk *= 1000; 3073 3074 if (override_timeout_clk) 3075 host->timeout_clk = override_timeout_clk; 3076 3077 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3078 host->ops->get_max_timeout_count(host) : 1 << 27; 3079 mmc->max_busy_timeout /= host->timeout_clk; 3080 } 3081 3082 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3083 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3084 3085 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3086 host->flags |= SDHCI_AUTO_CMD12; 3087 3088 /* Auto-CMD23 stuff only works in ADMA or PIO. */ 3089 if ((host->version >= SDHCI_SPEC_300) && 3090 ((host->flags & SDHCI_USE_ADMA) || 3091 !(host->flags & SDHCI_USE_SDMA)) && 3092 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3093 host->flags |= SDHCI_AUTO_CMD23; 3094 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); 3095 } else { 3096 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); 3097 } 3098 3099 /* 3100 * A controller may support 8-bit width, but the board itself 3101 * might not have the pins brought out. Boards that support 3102 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3103 * their platform code before calling sdhci_add_host(), and we 3104 * won't assume 8-bit width for hosts without that CAP. 3105 */ 3106 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3107 mmc->caps |= MMC_CAP_4_BIT_DATA; 3108 3109 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3110 mmc->caps &= ~MMC_CAP_CMD23; 3111 3112 if (caps[0] & SDHCI_CAN_DO_HISPD) 3113 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3114 3115 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3116 !(mmc->caps & MMC_CAP_NONREMOVABLE) && 3117 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) 3118 mmc->caps |= MMC_CAP_NEEDS_POLL; 3119 3120 /* If there are external regulators, get them */ 3121 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER) 3122 return -EPROBE_DEFER; 3123 3124 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ 3125 if (!IS_ERR(mmc->supply.vqmmc)) { 3126 ret = regulator_enable(mmc->supply.vqmmc); 3127 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 3128 1950000)) 3129 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | 3130 SDHCI_SUPPORT_SDR50 | 3131 SDHCI_SUPPORT_DDR50); 3132 if (ret) { 3133 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3134 mmc_hostname(mmc), ret); 3135 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 3136 } 3137 } 3138 3139 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) 3140 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3141 SDHCI_SUPPORT_DDR50); 3142 3143 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 3144 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3145 SDHCI_SUPPORT_DDR50)) 3146 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 3147 3148 /* SDR104 supports also implies SDR50 support */ 3149 if (caps[1] & SDHCI_SUPPORT_SDR104) { 3150 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 3151 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3152 * field can be promoted to support HS200. 3153 */ 3154 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 3155 mmc->caps2 |= MMC_CAP2_HS200; 3156 } else if (caps[1] & SDHCI_SUPPORT_SDR50) 3157 mmc->caps |= MMC_CAP_UHS_SDR50; 3158 3159 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 3160 (caps[1] & SDHCI_SUPPORT_HS400)) 3161 mmc->caps2 |= MMC_CAP2_HS400; 3162 3163 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 3164 (IS_ERR(mmc->supply.vqmmc) || 3165 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 3166 1300000))) 3167 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 3168 3169 if ((caps[1] & SDHCI_SUPPORT_DDR50) && 3170 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3171 mmc->caps |= MMC_CAP_UHS_DDR50; 3172 3173 /* Does the host need tuning for SDR50? */ 3174 if (caps[1] & SDHCI_USE_SDR50_TUNING) 3175 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 3176 3177 /* Does the host need tuning for SDR104 / HS200? */ 3178 if (mmc->caps2 & MMC_CAP2_HS200) 3179 host->flags |= SDHCI_SDR104_NEEDS_TUNING; 3180 3181 /* Driver Type(s) (A, C, D) supported by the host */ 3182 if (caps[1] & SDHCI_DRIVER_TYPE_A) 3183 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 3184 if (caps[1] & SDHCI_DRIVER_TYPE_C) 3185 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 3186 if (caps[1] & SDHCI_DRIVER_TYPE_D) 3187 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 3188 3189 /* Initial value for re-tuning timer count */ 3190 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 3191 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 3192 3193 /* 3194 * In case Re-tuning Timer is not disabled, the actual value of 3195 * re-tuning timer will be 2 ^ (n - 1). 3196 */ 3197 if (host->tuning_count) 3198 host->tuning_count = 1 << (host->tuning_count - 1); 3199 3200 /* Re-tuning mode supported by the Host Controller */ 3201 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> 3202 SDHCI_RETUNING_MODE_SHIFT; 3203 3204 ocr_avail = 0; 3205 3206 /* 3207 * According to SD Host Controller spec v3.00, if the Host System 3208 * can afford more than 150mA, Host Driver should set XPC to 1. Also 3209 * the value is meaningful only if Voltage Support in the Capabilities 3210 * register is set. The actual current value is 4 times the register 3211 * value. 3212 */ 3213 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 3214 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 3215 int curr = regulator_get_current_limit(mmc->supply.vmmc); 3216 if (curr > 0) { 3217 3218 /* convert to SDHCI_MAX_CURRENT format */ 3219 curr = curr/1000; /* convert to mA */ 3220 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 3221 3222 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 3223 max_current_caps = 3224 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 3225 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 3226 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 3227 } 3228 } 3229 3230 if (caps[0] & SDHCI_CAN_VDD_330) { 3231 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 3232 3233 mmc->max_current_330 = ((max_current_caps & 3234 SDHCI_MAX_CURRENT_330_MASK) >> 3235 SDHCI_MAX_CURRENT_330_SHIFT) * 3236 SDHCI_MAX_CURRENT_MULTIPLIER; 3237 } 3238 if (caps[0] & SDHCI_CAN_VDD_300) { 3239 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 3240 3241 mmc->max_current_300 = ((max_current_caps & 3242 SDHCI_MAX_CURRENT_300_MASK) >> 3243 SDHCI_MAX_CURRENT_300_SHIFT) * 3244 SDHCI_MAX_CURRENT_MULTIPLIER; 3245 } 3246 if (caps[0] & SDHCI_CAN_VDD_180) { 3247 ocr_avail |= MMC_VDD_165_195; 3248 3249 mmc->max_current_180 = ((max_current_caps & 3250 SDHCI_MAX_CURRENT_180_MASK) >> 3251 SDHCI_MAX_CURRENT_180_SHIFT) * 3252 SDHCI_MAX_CURRENT_MULTIPLIER; 3253 } 3254 3255 /* If OCR set by host, use it instead. */ 3256 if (host->ocr_mask) 3257 ocr_avail = host->ocr_mask; 3258 3259 /* If OCR set by external regulators, give it highest prio. */ 3260 if (mmc->ocr_avail) 3261 ocr_avail = mmc->ocr_avail; 3262 3263 mmc->ocr_avail = ocr_avail; 3264 mmc->ocr_avail_sdio = ocr_avail; 3265 if (host->ocr_avail_sdio) 3266 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 3267 mmc->ocr_avail_sd = ocr_avail; 3268 if (host->ocr_avail_sd) 3269 mmc->ocr_avail_sd &= host->ocr_avail_sd; 3270 else /* normal SD controllers don't support 1.8V */ 3271 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 3272 mmc->ocr_avail_mmc = ocr_avail; 3273 if (host->ocr_avail_mmc) 3274 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 3275 3276 if (mmc->ocr_avail == 0) { 3277 pr_err("%s: Hardware doesn't report any support voltages.\n", 3278 mmc_hostname(mmc)); 3279 return -ENODEV; 3280 } 3281 3282 spin_lock_init(&host->lock); 3283 3284 /* 3285 * Maximum number of segments. Depends on if the hardware 3286 * can do scatter/gather or not. 3287 */ 3288 if (host->flags & SDHCI_USE_ADMA) 3289 mmc->max_segs = SDHCI_MAX_SEGS; 3290 else if (host->flags & SDHCI_USE_SDMA) 3291 mmc->max_segs = 1; 3292 else /* PIO */ 3293 mmc->max_segs = SDHCI_MAX_SEGS; 3294 3295 /* 3296 * Maximum number of sectors in one transfer. Limited by SDMA boundary 3297 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 3298 * is less anyway. 3299 */ 3300 mmc->max_req_size = 524288; 3301 3302 /* 3303 * Maximum segment size. Could be one segment with the maximum number 3304 * of bytes. When doing hardware scatter/gather, each entry cannot 3305 * be larger than 64 KiB though. 3306 */ 3307 if (host->flags & SDHCI_USE_ADMA) { 3308 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 3309 mmc->max_seg_size = 65535; 3310 else 3311 mmc->max_seg_size = 65536; 3312 } else { 3313 mmc->max_seg_size = mmc->max_req_size; 3314 } 3315 3316 /* 3317 * Maximum block size. This varies from controller to controller and 3318 * is specified in the capabilities register. 3319 */ 3320 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 3321 mmc->max_blk_size = 2; 3322 } else { 3323 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> 3324 SDHCI_MAX_BLOCK_SHIFT; 3325 if (mmc->max_blk_size >= 3) { 3326 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 3327 mmc_hostname(mmc)); 3328 mmc->max_blk_size = 0; 3329 } 3330 } 3331 3332 mmc->max_blk_size = 512 << mmc->max_blk_size; 3333 3334 /* 3335 * Maximum block count. 3336 */ 3337 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 3338 3339 /* 3340 * Init tasklets. 3341 */ 3342 tasklet_init(&host->finish_tasklet, 3343 sdhci_tasklet_finish, (unsigned long)host); 3344 3345 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 3346 3347 init_waitqueue_head(&host->buf_ready_int); 3348 3349 sdhci_init(host, 0); 3350 3351 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 3352 IRQF_SHARED, mmc_hostname(mmc), host); 3353 if (ret) { 3354 pr_err("%s: Failed to request IRQ %d: %d\n", 3355 mmc_hostname(mmc), host->irq, ret); 3356 goto untasklet; 3357 } 3358 3359 #ifdef CONFIG_MMC_DEBUG 3360 sdhci_dumpregs(host); 3361 #endif 3362 3363 #ifdef SDHCI_USE_LEDS_CLASS 3364 snprintf(host->led_name, sizeof(host->led_name), 3365 "%s::", mmc_hostname(mmc)); 3366 host->led.name = host->led_name; 3367 host->led.brightness = LED_OFF; 3368 host->led.default_trigger = mmc_hostname(mmc); 3369 host->led.brightness_set = sdhci_led_control; 3370 3371 ret = led_classdev_register(mmc_dev(mmc), &host->led); 3372 if (ret) { 3373 pr_err("%s: Failed to register LED device: %d\n", 3374 mmc_hostname(mmc), ret); 3375 goto reset; 3376 } 3377 #endif 3378 3379 mmiowb(); 3380 3381 mmc_add_host(mmc); 3382 3383 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3384 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3385 (host->flags & SDHCI_USE_ADMA) ? 3386 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 3387 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3388 3389 sdhci_enable_card_detection(host); 3390 3391 return 0; 3392 3393 #ifdef SDHCI_USE_LEDS_CLASS 3394 reset: 3395 sdhci_do_reset(host, SDHCI_RESET_ALL); 3396 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3397 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3398 free_irq(host->irq, host); 3399 #endif 3400 untasklet: 3401 tasklet_kill(&host->finish_tasklet); 3402 3403 return ret; 3404 } 3405 3406 EXPORT_SYMBOL_GPL(sdhci_add_host); 3407 3408 void sdhci_remove_host(struct sdhci_host *host, int dead) 3409 { 3410 struct mmc_host *mmc = host->mmc; 3411 unsigned long flags; 3412 3413 if (dead) { 3414 spin_lock_irqsave(&host->lock, flags); 3415 3416 host->flags |= SDHCI_DEVICE_DEAD; 3417 3418 if (host->mrq) { 3419 pr_err("%s: Controller removed during " 3420 " transfer!\n", mmc_hostname(mmc)); 3421 3422 host->mrq->cmd->error = -ENOMEDIUM; 3423 tasklet_schedule(&host->finish_tasklet); 3424 } 3425 3426 spin_unlock_irqrestore(&host->lock, flags); 3427 } 3428 3429 sdhci_disable_card_detection(host); 3430 3431 mmc_remove_host(mmc); 3432 3433 #ifdef SDHCI_USE_LEDS_CLASS 3434 led_classdev_unregister(&host->led); 3435 #endif 3436 3437 if (!dead) 3438 sdhci_do_reset(host, SDHCI_RESET_ALL); 3439 3440 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3441 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3442 free_irq(host->irq, host); 3443 3444 del_timer_sync(&host->timer); 3445 3446 tasklet_kill(&host->finish_tasklet); 3447 3448 if (!IS_ERR(mmc->supply.vqmmc)) 3449 regulator_disable(mmc->supply.vqmmc); 3450 3451 if (host->align_buffer) 3452 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3453 host->adma_table_sz, host->align_buffer, 3454 host->align_addr); 3455 3456 host->adma_table = NULL; 3457 host->align_buffer = NULL; 3458 } 3459 3460 EXPORT_SYMBOL_GPL(sdhci_remove_host); 3461 3462 void sdhci_free_host(struct sdhci_host *host) 3463 { 3464 mmc_free_host(host->mmc); 3465 } 3466 3467 EXPORT_SYMBOL_GPL(sdhci_free_host); 3468 3469 /*****************************************************************************\ 3470 * * 3471 * Driver init/exit * 3472 * * 3473 \*****************************************************************************/ 3474 3475 static int __init sdhci_drv_init(void) 3476 { 3477 pr_info(DRIVER_NAME 3478 ": Secure Digital Host Controller Interface driver\n"); 3479 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 3480 3481 return 0; 3482 } 3483 3484 static void __exit sdhci_drv_exit(void) 3485 { 3486 } 3487 3488 module_init(sdhci_drv_init); 3489 module_exit(sdhci_drv_exit); 3490 3491 module_param(debug_quirks, uint, 0444); 3492 module_param(debug_quirks2, uint, 0444); 3493 3494 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 3495 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 3496 MODULE_LICENSE("GPL"); 3497 3498 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 3499 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 3500