1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/highmem.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/slab.h> 22 #include <linux/scatterlist.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 26 #include <linux/leds.h> 27 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/slot-gpio.h> 32 33 #include "sdhci.h" 34 35 #define DRIVER_NAME "sdhci" 36 37 #define DBG(f, x...) \ 38 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 39 40 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 41 defined(CONFIG_MMC_SDHCI_MODULE)) 42 #define SDHCI_USE_LEDS_CLASS 43 #endif 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_finish_data(struct sdhci_host *); 51 52 static void sdhci_finish_command(struct sdhci_host *); 53 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 54 static void sdhci_tuning_timer(unsigned long data); 55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 56 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 57 struct mmc_data *data, 58 struct sdhci_host_next *next); 59 60 #ifdef CONFIG_PM 61 static int sdhci_runtime_pm_get(struct sdhci_host *host); 62 static int sdhci_runtime_pm_put(struct sdhci_host *host); 63 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); 64 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host); 65 #else 66 static inline int sdhci_runtime_pm_get(struct sdhci_host *host) 67 { 68 return 0; 69 } 70 static inline int sdhci_runtime_pm_put(struct sdhci_host *host) 71 { 72 return 0; 73 } 74 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 75 { 76 } 77 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 78 { 79 } 80 #endif 81 82 static void sdhci_dumpregs(struct sdhci_host *host) 83 { 84 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 85 mmc_hostname(host->mmc)); 86 87 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 88 sdhci_readl(host, SDHCI_DMA_ADDRESS), 89 sdhci_readw(host, SDHCI_HOST_VERSION)); 90 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 91 sdhci_readw(host, SDHCI_BLOCK_SIZE), 92 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 93 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 94 sdhci_readl(host, SDHCI_ARGUMENT), 95 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 96 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 97 sdhci_readl(host, SDHCI_PRESENT_STATE), 98 sdhci_readb(host, SDHCI_HOST_CONTROL)); 99 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 100 sdhci_readb(host, SDHCI_POWER_CONTROL), 101 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 102 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 103 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 104 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 105 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 106 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 107 sdhci_readl(host, SDHCI_INT_STATUS)); 108 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 109 sdhci_readl(host, SDHCI_INT_ENABLE), 110 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 111 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 112 sdhci_readw(host, SDHCI_ACMD12_ERR), 113 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 114 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 115 sdhci_readl(host, SDHCI_CAPABILITIES), 116 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 117 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 118 sdhci_readw(host, SDHCI_COMMAND), 119 sdhci_readl(host, SDHCI_MAX_CURRENT)); 120 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 121 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 122 123 if (host->flags & SDHCI_USE_ADMA) { 124 if (host->flags & SDHCI_USE_64_BIT_DMA) 125 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 126 readl(host->ioaddr + SDHCI_ADMA_ERROR), 127 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), 128 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 129 else 130 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 131 readl(host->ioaddr + SDHCI_ADMA_ERROR), 132 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 133 } 134 135 pr_debug(DRIVER_NAME ": ===========================================\n"); 136 } 137 138 /*****************************************************************************\ 139 * * 140 * Low level functions * 141 * * 142 \*****************************************************************************/ 143 144 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 145 { 146 u32 present; 147 148 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 149 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 150 return; 151 152 if (enable) { 153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 154 SDHCI_CARD_PRESENT; 155 156 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 157 SDHCI_INT_CARD_INSERT; 158 } else { 159 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 160 } 161 162 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 163 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 164 } 165 166 static void sdhci_enable_card_detection(struct sdhci_host *host) 167 { 168 sdhci_set_card_detection(host, true); 169 } 170 171 static void sdhci_disable_card_detection(struct sdhci_host *host) 172 { 173 sdhci_set_card_detection(host, false); 174 } 175 176 void sdhci_reset(struct sdhci_host *host, u8 mask) 177 { 178 unsigned long timeout; 179 180 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 181 182 if (mask & SDHCI_RESET_ALL) { 183 host->clock = 0; 184 /* Reset-all turns off SD Bus Power */ 185 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 186 sdhci_runtime_pm_bus_off(host); 187 } 188 189 /* Wait max 100 ms */ 190 timeout = 100; 191 192 /* hw clears the bit when it's done */ 193 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 194 if (timeout == 0) { 195 pr_err("%s: Reset 0x%x never completed.\n", 196 mmc_hostname(host->mmc), (int)mask); 197 sdhci_dumpregs(host); 198 return; 199 } 200 timeout--; 201 mdelay(1); 202 } 203 } 204 EXPORT_SYMBOL_GPL(sdhci_reset); 205 206 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 207 { 208 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 209 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 210 SDHCI_CARD_PRESENT)) 211 return; 212 } 213 214 host->ops->reset(host, mask); 215 216 if (mask & SDHCI_RESET_ALL) { 217 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 218 if (host->ops->enable_dma) 219 host->ops->enable_dma(host); 220 } 221 222 /* Resetting the controller clears many */ 223 host->preset_enabled = false; 224 } 225 } 226 227 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 228 229 static void sdhci_init(struct sdhci_host *host, int soft) 230 { 231 if (soft) 232 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 233 else 234 sdhci_do_reset(host, SDHCI_RESET_ALL); 235 236 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 237 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 238 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 239 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 240 SDHCI_INT_RESPONSE; 241 242 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 243 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 244 245 if (soft) { 246 /* force clock reconfiguration */ 247 host->clock = 0; 248 sdhci_set_ios(host->mmc, &host->mmc->ios); 249 } 250 } 251 252 static void sdhci_reinit(struct sdhci_host *host) 253 { 254 sdhci_init(host, 0); 255 /* 256 * Retuning stuffs are affected by different cards inserted and only 257 * applicable to UHS-I cards. So reset these fields to their initial 258 * value when card is removed. 259 */ 260 if (host->flags & SDHCI_USING_RETUNING_TIMER) { 261 host->flags &= ~SDHCI_USING_RETUNING_TIMER; 262 263 del_timer_sync(&host->tuning_timer); 264 host->flags &= ~SDHCI_NEEDS_RETUNING; 265 } 266 sdhci_enable_card_detection(host); 267 } 268 269 static void sdhci_activate_led(struct sdhci_host *host) 270 { 271 u8 ctrl; 272 273 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 274 ctrl |= SDHCI_CTRL_LED; 275 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 276 } 277 278 static void sdhci_deactivate_led(struct sdhci_host *host) 279 { 280 u8 ctrl; 281 282 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 283 ctrl &= ~SDHCI_CTRL_LED; 284 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 285 } 286 287 #ifdef SDHCI_USE_LEDS_CLASS 288 static void sdhci_led_control(struct led_classdev *led, 289 enum led_brightness brightness) 290 { 291 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 292 unsigned long flags; 293 294 spin_lock_irqsave(&host->lock, flags); 295 296 if (host->runtime_suspended) 297 goto out; 298 299 if (brightness == LED_OFF) 300 sdhci_deactivate_led(host); 301 else 302 sdhci_activate_led(host); 303 out: 304 spin_unlock_irqrestore(&host->lock, flags); 305 } 306 #endif 307 308 /*****************************************************************************\ 309 * * 310 * Core functions * 311 * * 312 \*****************************************************************************/ 313 314 static void sdhci_read_block_pio(struct sdhci_host *host) 315 { 316 unsigned long flags; 317 size_t blksize, len, chunk; 318 u32 uninitialized_var(scratch); 319 u8 *buf; 320 321 DBG("PIO reading\n"); 322 323 blksize = host->data->blksz; 324 chunk = 0; 325 326 local_irq_save(flags); 327 328 while (blksize) { 329 if (!sg_miter_next(&host->sg_miter)) 330 BUG(); 331 332 len = min(host->sg_miter.length, blksize); 333 334 blksize -= len; 335 host->sg_miter.consumed = len; 336 337 buf = host->sg_miter.addr; 338 339 while (len) { 340 if (chunk == 0) { 341 scratch = sdhci_readl(host, SDHCI_BUFFER); 342 chunk = 4; 343 } 344 345 *buf = scratch & 0xFF; 346 347 buf++; 348 scratch >>= 8; 349 chunk--; 350 len--; 351 } 352 } 353 354 sg_miter_stop(&host->sg_miter); 355 356 local_irq_restore(flags); 357 } 358 359 static void sdhci_write_block_pio(struct sdhci_host *host) 360 { 361 unsigned long flags; 362 size_t blksize, len, chunk; 363 u32 scratch; 364 u8 *buf; 365 366 DBG("PIO writing\n"); 367 368 blksize = host->data->blksz; 369 chunk = 0; 370 scratch = 0; 371 372 local_irq_save(flags); 373 374 while (blksize) { 375 if (!sg_miter_next(&host->sg_miter)) 376 BUG(); 377 378 len = min(host->sg_miter.length, blksize); 379 380 blksize -= len; 381 host->sg_miter.consumed = len; 382 383 buf = host->sg_miter.addr; 384 385 while (len) { 386 scratch |= (u32)*buf << (chunk * 8); 387 388 buf++; 389 chunk++; 390 len--; 391 392 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 393 sdhci_writel(host, scratch, SDHCI_BUFFER); 394 chunk = 0; 395 scratch = 0; 396 } 397 } 398 } 399 400 sg_miter_stop(&host->sg_miter); 401 402 local_irq_restore(flags); 403 } 404 405 static void sdhci_transfer_pio(struct sdhci_host *host) 406 { 407 u32 mask; 408 409 BUG_ON(!host->data); 410 411 if (host->blocks == 0) 412 return; 413 414 if (host->data->flags & MMC_DATA_READ) 415 mask = SDHCI_DATA_AVAILABLE; 416 else 417 mask = SDHCI_SPACE_AVAILABLE; 418 419 /* 420 * Some controllers (JMicron JMB38x) mess up the buffer bits 421 * for transfers < 4 bytes. As long as it is just one block, 422 * we can ignore the bits. 423 */ 424 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 425 (host->data->blocks == 1)) 426 mask = ~0; 427 428 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 429 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 430 udelay(100); 431 432 if (host->data->flags & MMC_DATA_READ) 433 sdhci_read_block_pio(host); 434 else 435 sdhci_write_block_pio(host); 436 437 host->blocks--; 438 if (host->blocks == 0) 439 break; 440 } 441 442 DBG("PIO transfer complete.\n"); 443 } 444 445 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 446 { 447 local_irq_save(*flags); 448 return kmap_atomic(sg_page(sg)) + sg->offset; 449 } 450 451 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 452 { 453 kunmap_atomic(buffer); 454 local_irq_restore(*flags); 455 } 456 457 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, 458 dma_addr_t addr, int len, unsigned cmd) 459 { 460 struct sdhci_adma2_64_desc *dma_desc = desc; 461 462 /* 32-bit and 64-bit descriptors have these members in same position */ 463 dma_desc->cmd = cpu_to_le16(cmd); 464 dma_desc->len = cpu_to_le16(len); 465 dma_desc->addr_lo = cpu_to_le32((u32)addr); 466 467 if (host->flags & SDHCI_USE_64_BIT_DMA) 468 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); 469 } 470 471 static void sdhci_adma_mark_end(void *desc) 472 { 473 struct sdhci_adma2_64_desc *dma_desc = desc; 474 475 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 476 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 477 } 478 479 static int sdhci_adma_table_pre(struct sdhci_host *host, 480 struct mmc_data *data) 481 { 482 int direction; 483 484 void *desc; 485 void *align; 486 dma_addr_t addr; 487 dma_addr_t align_addr; 488 int len, offset; 489 490 struct scatterlist *sg; 491 int i; 492 char *buffer; 493 unsigned long flags; 494 495 /* 496 * The spec does not specify endianness of descriptor table. 497 * We currently guess that it is LE. 498 */ 499 500 if (data->flags & MMC_DATA_READ) 501 direction = DMA_FROM_DEVICE; 502 else 503 direction = DMA_TO_DEVICE; 504 505 host->align_addr = dma_map_single(mmc_dev(host->mmc), 506 host->align_buffer, host->align_buffer_sz, direction); 507 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 508 goto fail; 509 BUG_ON(host->align_addr & host->align_mask); 510 511 host->sg_count = sdhci_pre_dma_transfer(host, data, NULL); 512 if (host->sg_count < 0) 513 goto unmap_align; 514 515 desc = host->adma_table; 516 align = host->align_buffer; 517 518 align_addr = host->align_addr; 519 520 for_each_sg(data->sg, sg, host->sg_count, i) { 521 addr = sg_dma_address(sg); 522 len = sg_dma_len(sg); 523 524 /* 525 * The SDHCI specification states that ADMA 526 * addresses must be 32-bit aligned. If they 527 * aren't, then we use a bounce buffer for 528 * the (up to three) bytes that screw up the 529 * alignment. 530 */ 531 offset = (host->align_sz - (addr & host->align_mask)) & 532 host->align_mask; 533 if (offset) { 534 if (data->flags & MMC_DATA_WRITE) { 535 buffer = sdhci_kmap_atomic(sg, &flags); 536 memcpy(align, buffer, offset); 537 sdhci_kunmap_atomic(buffer, &flags); 538 } 539 540 /* tran, valid */ 541 sdhci_adma_write_desc(host, desc, align_addr, offset, 542 ADMA2_TRAN_VALID); 543 544 BUG_ON(offset > 65536); 545 546 align += host->align_sz; 547 align_addr += host->align_sz; 548 549 desc += host->desc_sz; 550 551 addr += offset; 552 len -= offset; 553 } 554 555 BUG_ON(len > 65536); 556 557 /* tran, valid */ 558 sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); 559 desc += host->desc_sz; 560 561 /* 562 * If this triggers then we have a calculation bug 563 * somewhere. :/ 564 */ 565 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 566 } 567 568 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 569 /* 570 * Mark the last descriptor as the terminating descriptor 571 */ 572 if (desc != host->adma_table) { 573 desc -= host->desc_sz; 574 sdhci_adma_mark_end(desc); 575 } 576 } else { 577 /* 578 * Add a terminating entry. 579 */ 580 581 /* nop, end, valid */ 582 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); 583 } 584 585 /* 586 * Resync align buffer as we might have changed it. 587 */ 588 if (data->flags & MMC_DATA_WRITE) { 589 dma_sync_single_for_device(mmc_dev(host->mmc), 590 host->align_addr, host->align_buffer_sz, direction); 591 } 592 593 return 0; 594 595 unmap_align: 596 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 597 host->align_buffer_sz, direction); 598 fail: 599 return -EINVAL; 600 } 601 602 static void sdhci_adma_table_post(struct sdhci_host *host, 603 struct mmc_data *data) 604 { 605 int direction; 606 607 struct scatterlist *sg; 608 int i, size; 609 void *align; 610 char *buffer; 611 unsigned long flags; 612 bool has_unaligned; 613 614 if (data->flags & MMC_DATA_READ) 615 direction = DMA_FROM_DEVICE; 616 else 617 direction = DMA_TO_DEVICE; 618 619 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 620 host->align_buffer_sz, direction); 621 622 /* Do a quick scan of the SG list for any unaligned mappings */ 623 has_unaligned = false; 624 for_each_sg(data->sg, sg, host->sg_count, i) 625 if (sg_dma_address(sg) & host->align_mask) { 626 has_unaligned = true; 627 break; 628 } 629 630 if (has_unaligned && data->flags & MMC_DATA_READ) { 631 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 632 data->sg_len, direction); 633 634 align = host->align_buffer; 635 636 for_each_sg(data->sg, sg, host->sg_count, i) { 637 if (sg_dma_address(sg) & host->align_mask) { 638 size = host->align_sz - 639 (sg_dma_address(sg) & host->align_mask); 640 641 buffer = sdhci_kmap_atomic(sg, &flags); 642 memcpy(buffer, align, size); 643 sdhci_kunmap_atomic(buffer, &flags); 644 645 align += host->align_sz; 646 } 647 } 648 } 649 650 if (!data->host_cookie) 651 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 652 data->sg_len, direction); 653 } 654 655 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) 656 { 657 u8 count; 658 struct mmc_data *data = cmd->data; 659 unsigned target_timeout, current_timeout; 660 661 /* 662 * If the host controller provides us with an incorrect timeout 663 * value, just skip the check and use 0xE. The hardware may take 664 * longer to time out, but that's much better than having a too-short 665 * timeout value. 666 */ 667 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 668 return 0xE; 669 670 /* Unspecified timeout, assume max */ 671 if (!data && !cmd->busy_timeout) 672 return 0xE; 673 674 /* timeout in us */ 675 if (!data) 676 target_timeout = cmd->busy_timeout * 1000; 677 else { 678 target_timeout = data->timeout_ns / 1000; 679 if (host->clock) 680 target_timeout += data->timeout_clks / host->clock; 681 } 682 683 /* 684 * Figure out needed cycles. 685 * We do this in steps in order to fit inside a 32 bit int. 686 * The first step is the minimum timeout, which will have a 687 * minimum resolution of 6 bits: 688 * (1) 2^13*1000 > 2^22, 689 * (2) host->timeout_clk < 2^16 690 * => 691 * (1) / (2) > 2^6 692 */ 693 count = 0; 694 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 695 while (current_timeout < target_timeout) { 696 count++; 697 current_timeout <<= 1; 698 if (count >= 0xF) 699 break; 700 } 701 702 if (count >= 0xF) { 703 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", 704 mmc_hostname(host->mmc), count, cmd->opcode); 705 count = 0xE; 706 } 707 708 return count; 709 } 710 711 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 712 { 713 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 714 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 715 716 if (host->flags & SDHCI_REQ_USE_DMA) 717 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 718 else 719 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 720 721 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 722 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 723 } 724 725 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 726 { 727 u8 count; 728 729 if (host->ops->set_timeout) { 730 host->ops->set_timeout(host, cmd); 731 } else { 732 count = sdhci_calc_timeout(host, cmd); 733 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 734 } 735 } 736 737 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 738 { 739 u8 ctrl; 740 struct mmc_data *data = cmd->data; 741 int ret; 742 743 WARN_ON(host->data); 744 745 if (data || (cmd->flags & MMC_RSP_BUSY)) 746 sdhci_set_timeout(host, cmd); 747 748 if (!data) 749 return; 750 751 /* Sanity checks */ 752 BUG_ON(data->blksz * data->blocks > 524288); 753 BUG_ON(data->blksz > host->mmc->max_blk_size); 754 BUG_ON(data->blocks > 65535); 755 756 host->data = data; 757 host->data_early = 0; 758 host->data->bytes_xfered = 0; 759 760 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 761 host->flags |= SDHCI_REQ_USE_DMA; 762 763 /* 764 * FIXME: This doesn't account for merging when mapping the 765 * scatterlist. 766 */ 767 if (host->flags & SDHCI_REQ_USE_DMA) { 768 int broken, i; 769 struct scatterlist *sg; 770 771 broken = 0; 772 if (host->flags & SDHCI_USE_ADMA) { 773 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 774 broken = 1; 775 } else { 776 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 777 broken = 1; 778 } 779 780 if (unlikely(broken)) { 781 for_each_sg(data->sg, sg, data->sg_len, i) { 782 if (sg->length & 0x3) { 783 DBG("Reverting to PIO because of " 784 "transfer size (%d)\n", 785 sg->length); 786 host->flags &= ~SDHCI_REQ_USE_DMA; 787 break; 788 } 789 } 790 } 791 } 792 793 /* 794 * The assumption here being that alignment is the same after 795 * translation to device address space. 796 */ 797 if (host->flags & SDHCI_REQ_USE_DMA) { 798 int broken, i; 799 struct scatterlist *sg; 800 801 broken = 0; 802 if (host->flags & SDHCI_USE_ADMA) { 803 /* 804 * As we use 3 byte chunks to work around 805 * alignment problems, we need to check this 806 * quirk. 807 */ 808 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 809 broken = 1; 810 } else { 811 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 812 broken = 1; 813 } 814 815 if (unlikely(broken)) { 816 for_each_sg(data->sg, sg, data->sg_len, i) { 817 if (sg->offset & 0x3) { 818 DBG("Reverting to PIO because of " 819 "bad alignment\n"); 820 host->flags &= ~SDHCI_REQ_USE_DMA; 821 break; 822 } 823 } 824 } 825 } 826 827 if (host->flags & SDHCI_REQ_USE_DMA) { 828 if (host->flags & SDHCI_USE_ADMA) { 829 ret = sdhci_adma_table_pre(host, data); 830 if (ret) { 831 /* 832 * This only happens when someone fed 833 * us an invalid request. 834 */ 835 WARN_ON(1); 836 host->flags &= ~SDHCI_REQ_USE_DMA; 837 } else { 838 sdhci_writel(host, host->adma_addr, 839 SDHCI_ADMA_ADDRESS); 840 if (host->flags & SDHCI_USE_64_BIT_DMA) 841 sdhci_writel(host, 842 (u64)host->adma_addr >> 32, 843 SDHCI_ADMA_ADDRESS_HI); 844 } 845 } else { 846 int sg_cnt; 847 848 sg_cnt = sdhci_pre_dma_transfer(host, data, NULL); 849 if (sg_cnt == 0) { 850 /* 851 * This only happens when someone fed 852 * us an invalid request. 853 */ 854 WARN_ON(1); 855 host->flags &= ~SDHCI_REQ_USE_DMA; 856 } else { 857 WARN_ON(sg_cnt != 1); 858 sdhci_writel(host, sg_dma_address(data->sg), 859 SDHCI_DMA_ADDRESS); 860 } 861 } 862 } 863 864 /* 865 * Always adjust the DMA selection as some controllers 866 * (e.g. JMicron) can't do PIO properly when the selection 867 * is ADMA. 868 */ 869 if (host->version >= SDHCI_SPEC_200) { 870 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 871 ctrl &= ~SDHCI_CTRL_DMA_MASK; 872 if ((host->flags & SDHCI_REQ_USE_DMA) && 873 (host->flags & SDHCI_USE_ADMA)) { 874 if (host->flags & SDHCI_USE_64_BIT_DMA) 875 ctrl |= SDHCI_CTRL_ADMA64; 876 else 877 ctrl |= SDHCI_CTRL_ADMA32; 878 } else { 879 ctrl |= SDHCI_CTRL_SDMA; 880 } 881 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 882 } 883 884 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 885 int flags; 886 887 flags = SG_MITER_ATOMIC; 888 if (host->data->flags & MMC_DATA_READ) 889 flags |= SG_MITER_TO_SG; 890 else 891 flags |= SG_MITER_FROM_SG; 892 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 893 host->blocks = data->blocks; 894 } 895 896 sdhci_set_transfer_irqs(host); 897 898 /* Set the DMA boundary value and block size */ 899 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 900 data->blksz), SDHCI_BLOCK_SIZE); 901 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 902 } 903 904 static void sdhci_set_transfer_mode(struct sdhci_host *host, 905 struct mmc_command *cmd) 906 { 907 u16 mode = 0; 908 struct mmc_data *data = cmd->data; 909 910 if (data == NULL) { 911 if (host->quirks2 & 912 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 913 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 914 } else { 915 /* clear Auto CMD settings for no data CMDs */ 916 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 917 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 918 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 919 } 920 return; 921 } 922 923 WARN_ON(!host->data); 924 925 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 926 mode = SDHCI_TRNS_BLK_CNT_EN; 927 928 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 929 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 930 /* 931 * If we are sending CMD23, CMD12 never gets sent 932 * on successful completion (so no Auto-CMD12). 933 */ 934 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) 935 mode |= SDHCI_TRNS_AUTO_CMD12; 936 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 937 mode |= SDHCI_TRNS_AUTO_CMD23; 938 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); 939 } 940 } 941 942 if (data->flags & MMC_DATA_READ) 943 mode |= SDHCI_TRNS_READ; 944 if (host->flags & SDHCI_REQ_USE_DMA) 945 mode |= SDHCI_TRNS_DMA; 946 947 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 948 } 949 950 static void sdhci_finish_data(struct sdhci_host *host) 951 { 952 struct mmc_data *data; 953 954 BUG_ON(!host->data); 955 956 data = host->data; 957 host->data = NULL; 958 959 if (host->flags & SDHCI_REQ_USE_DMA) { 960 if (host->flags & SDHCI_USE_ADMA) 961 sdhci_adma_table_post(host, data); 962 else { 963 if (!data->host_cookie) 964 dma_unmap_sg(mmc_dev(host->mmc), 965 data->sg, data->sg_len, 966 (data->flags & MMC_DATA_READ) ? 967 DMA_FROM_DEVICE : DMA_TO_DEVICE); 968 } 969 } 970 971 /* 972 * The specification states that the block count register must 973 * be updated, but it does not specify at what point in the 974 * data flow. That makes the register entirely useless to read 975 * back so we have to assume that nothing made it to the card 976 * in the event of an error. 977 */ 978 if (data->error) 979 data->bytes_xfered = 0; 980 else 981 data->bytes_xfered = data->blksz * data->blocks; 982 983 /* 984 * Need to send CMD12 if - 985 * a) open-ended multiblock transfer (no CMD23) 986 * b) error in multiblock transfer 987 */ 988 if (data->stop && 989 (data->error || 990 !host->mrq->sbc)) { 991 992 /* 993 * The controller needs a reset of internal state machines 994 * upon error conditions. 995 */ 996 if (data->error) { 997 sdhci_do_reset(host, SDHCI_RESET_CMD); 998 sdhci_do_reset(host, SDHCI_RESET_DATA); 999 } 1000 1001 sdhci_send_command(host, data->stop); 1002 } else 1003 tasklet_schedule(&host->finish_tasklet); 1004 } 1005 1006 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1007 { 1008 int flags; 1009 u32 mask; 1010 unsigned long timeout; 1011 1012 WARN_ON(host->cmd); 1013 1014 /* Wait max 10 ms */ 1015 timeout = 10; 1016 1017 mask = SDHCI_CMD_INHIBIT; 1018 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 1019 mask |= SDHCI_DATA_INHIBIT; 1020 1021 /* We shouldn't wait for data inihibit for stop commands, even 1022 though they might use busy signaling */ 1023 if (host->mrq->data && (cmd == host->mrq->data->stop)) 1024 mask &= ~SDHCI_DATA_INHIBIT; 1025 1026 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1027 if (timeout == 0) { 1028 pr_err("%s: Controller never released " 1029 "inhibit bit(s).\n", mmc_hostname(host->mmc)); 1030 sdhci_dumpregs(host); 1031 cmd->error = -EIO; 1032 tasklet_schedule(&host->finish_tasklet); 1033 return; 1034 } 1035 timeout--; 1036 mdelay(1); 1037 } 1038 1039 timeout = jiffies; 1040 if (!cmd->data && cmd->busy_timeout > 9000) 1041 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1042 else 1043 timeout += 10 * HZ; 1044 mod_timer(&host->timer, timeout); 1045 1046 host->cmd = cmd; 1047 host->busy_handle = 0; 1048 1049 sdhci_prepare_data(host, cmd); 1050 1051 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1052 1053 sdhci_set_transfer_mode(host, cmd); 1054 1055 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1056 pr_err("%s: Unsupported response type!\n", 1057 mmc_hostname(host->mmc)); 1058 cmd->error = -EINVAL; 1059 tasklet_schedule(&host->finish_tasklet); 1060 return; 1061 } 1062 1063 if (!(cmd->flags & MMC_RSP_PRESENT)) 1064 flags = SDHCI_CMD_RESP_NONE; 1065 else if (cmd->flags & MMC_RSP_136) 1066 flags = SDHCI_CMD_RESP_LONG; 1067 else if (cmd->flags & MMC_RSP_BUSY) 1068 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1069 else 1070 flags = SDHCI_CMD_RESP_SHORT; 1071 1072 if (cmd->flags & MMC_RSP_CRC) 1073 flags |= SDHCI_CMD_CRC; 1074 if (cmd->flags & MMC_RSP_OPCODE) 1075 flags |= SDHCI_CMD_INDEX; 1076 1077 /* CMD19 is special in that the Data Present Select should be set */ 1078 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1079 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1080 flags |= SDHCI_CMD_DATA; 1081 1082 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1083 } 1084 EXPORT_SYMBOL_GPL(sdhci_send_command); 1085 1086 static void sdhci_finish_command(struct sdhci_host *host) 1087 { 1088 int i; 1089 1090 BUG_ON(host->cmd == NULL); 1091 1092 if (host->cmd->flags & MMC_RSP_PRESENT) { 1093 if (host->cmd->flags & MMC_RSP_136) { 1094 /* CRC is stripped so we need to do some shifting. */ 1095 for (i = 0;i < 4;i++) { 1096 host->cmd->resp[i] = sdhci_readl(host, 1097 SDHCI_RESPONSE + (3-i)*4) << 8; 1098 if (i != 3) 1099 host->cmd->resp[i] |= 1100 sdhci_readb(host, 1101 SDHCI_RESPONSE + (3-i)*4-1); 1102 } 1103 } else { 1104 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1105 } 1106 } 1107 1108 host->cmd->error = 0; 1109 1110 /* Finished CMD23, now send actual command. */ 1111 if (host->cmd == host->mrq->sbc) { 1112 host->cmd = NULL; 1113 sdhci_send_command(host, host->mrq->cmd); 1114 } else { 1115 1116 /* Processed actual command. */ 1117 if (host->data && host->data_early) 1118 sdhci_finish_data(host); 1119 1120 if (!host->cmd->data) 1121 tasklet_schedule(&host->finish_tasklet); 1122 1123 host->cmd = NULL; 1124 } 1125 } 1126 1127 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1128 { 1129 u16 preset = 0; 1130 1131 switch (host->timing) { 1132 case MMC_TIMING_UHS_SDR12: 1133 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1134 break; 1135 case MMC_TIMING_UHS_SDR25: 1136 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1137 break; 1138 case MMC_TIMING_UHS_SDR50: 1139 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1140 break; 1141 case MMC_TIMING_UHS_SDR104: 1142 case MMC_TIMING_MMC_HS200: 1143 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1144 break; 1145 case MMC_TIMING_UHS_DDR50: 1146 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1147 break; 1148 case MMC_TIMING_MMC_HS400: 1149 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1150 break; 1151 default: 1152 pr_warn("%s: Invalid UHS-I mode selected\n", 1153 mmc_hostname(host->mmc)); 1154 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1155 break; 1156 } 1157 return preset; 1158 } 1159 1160 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1161 { 1162 int div = 0; /* Initialized for compiler warning */ 1163 int real_div = div, clk_mul = 1; 1164 u16 clk = 0; 1165 unsigned long timeout; 1166 1167 host->mmc->actual_clock = 0; 1168 1169 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1170 1171 if (clock == 0) 1172 return; 1173 1174 if (host->version >= SDHCI_SPEC_300) { 1175 if (host->preset_enabled) { 1176 u16 pre_val; 1177 1178 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1179 pre_val = sdhci_get_preset_value(host); 1180 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1181 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1182 if (host->clk_mul && 1183 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1184 clk = SDHCI_PROG_CLOCK_MODE; 1185 real_div = div + 1; 1186 clk_mul = host->clk_mul; 1187 } else { 1188 real_div = max_t(int, 1, div << 1); 1189 } 1190 goto clock_set; 1191 } 1192 1193 /* 1194 * Check if the Host Controller supports Programmable Clock 1195 * Mode. 1196 */ 1197 if (host->clk_mul) { 1198 for (div = 1; div <= 1024; div++) { 1199 if ((host->max_clk * host->clk_mul / div) 1200 <= clock) 1201 break; 1202 } 1203 /* 1204 * Set Programmable Clock Mode in the Clock 1205 * Control register. 1206 */ 1207 clk = SDHCI_PROG_CLOCK_MODE; 1208 real_div = div; 1209 clk_mul = host->clk_mul; 1210 div--; 1211 } else { 1212 /* Version 3.00 divisors must be a multiple of 2. */ 1213 if (host->max_clk <= clock) 1214 div = 1; 1215 else { 1216 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1217 div += 2) { 1218 if ((host->max_clk / div) <= clock) 1219 break; 1220 } 1221 } 1222 real_div = div; 1223 div >>= 1; 1224 } 1225 } else { 1226 /* Version 2.00 divisors must be a power of 2. */ 1227 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1228 if ((host->max_clk / div) <= clock) 1229 break; 1230 } 1231 real_div = div; 1232 div >>= 1; 1233 } 1234 1235 clock_set: 1236 if (real_div) 1237 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; 1238 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1239 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1240 << SDHCI_DIVIDER_HI_SHIFT; 1241 clk |= SDHCI_CLOCK_INT_EN; 1242 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1243 1244 /* Wait max 20 ms */ 1245 timeout = 20; 1246 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1247 & SDHCI_CLOCK_INT_STABLE)) { 1248 if (timeout == 0) { 1249 pr_err("%s: Internal clock never " 1250 "stabilised.\n", mmc_hostname(host->mmc)); 1251 sdhci_dumpregs(host); 1252 return; 1253 } 1254 timeout--; 1255 mdelay(1); 1256 } 1257 1258 clk |= SDHCI_CLOCK_CARD_EN; 1259 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1260 } 1261 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1262 1263 static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1264 unsigned short vdd) 1265 { 1266 struct mmc_host *mmc = host->mmc; 1267 u8 pwr = 0; 1268 1269 if (!IS_ERR(mmc->supply.vmmc)) { 1270 spin_unlock_irq(&host->lock); 1271 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1272 spin_lock_irq(&host->lock); 1273 1274 if (mode != MMC_POWER_OFF) 1275 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1276 else 1277 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1278 1279 return; 1280 } 1281 1282 if (mode != MMC_POWER_OFF) { 1283 switch (1 << vdd) { 1284 case MMC_VDD_165_195: 1285 pwr = SDHCI_POWER_180; 1286 break; 1287 case MMC_VDD_29_30: 1288 case MMC_VDD_30_31: 1289 pwr = SDHCI_POWER_300; 1290 break; 1291 case MMC_VDD_32_33: 1292 case MMC_VDD_33_34: 1293 pwr = SDHCI_POWER_330; 1294 break; 1295 default: 1296 BUG(); 1297 } 1298 } 1299 1300 if (host->pwr == pwr) 1301 return; 1302 1303 host->pwr = pwr; 1304 1305 if (pwr == 0) { 1306 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1307 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1308 sdhci_runtime_pm_bus_off(host); 1309 vdd = 0; 1310 } else { 1311 /* 1312 * Spec says that we should clear the power reg before setting 1313 * a new value. Some controllers don't seem to like this though. 1314 */ 1315 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1316 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1317 1318 /* 1319 * At least the Marvell CaFe chip gets confused if we set the 1320 * voltage and set turn on power at the same time, so set the 1321 * voltage first. 1322 */ 1323 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1324 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1325 1326 pwr |= SDHCI_POWER_ON; 1327 1328 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1329 1330 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1331 sdhci_runtime_pm_bus_on(host); 1332 1333 /* 1334 * Some controllers need an extra 10ms delay of 10ms before 1335 * they can apply clock after applying power 1336 */ 1337 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1338 mdelay(10); 1339 } 1340 } 1341 1342 /*****************************************************************************\ 1343 * * 1344 * MMC callbacks * 1345 * * 1346 \*****************************************************************************/ 1347 1348 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1349 { 1350 struct sdhci_host *host; 1351 int present; 1352 unsigned long flags; 1353 u32 tuning_opcode; 1354 1355 host = mmc_priv(mmc); 1356 1357 sdhci_runtime_pm_get(host); 1358 1359 present = mmc_gpio_get_cd(host->mmc); 1360 1361 spin_lock_irqsave(&host->lock, flags); 1362 1363 WARN_ON(host->mrq != NULL); 1364 1365 #ifndef SDHCI_USE_LEDS_CLASS 1366 sdhci_activate_led(host); 1367 #endif 1368 1369 /* 1370 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1371 * requests if Auto-CMD12 is enabled. 1372 */ 1373 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 1374 if (mrq->stop) { 1375 mrq->data->stop = NULL; 1376 mrq->stop = NULL; 1377 } 1378 } 1379 1380 host->mrq = mrq; 1381 1382 /* 1383 * Firstly check card presence from cd-gpio. The return could 1384 * be one of the following possibilities: 1385 * negative: cd-gpio is not available 1386 * zero: cd-gpio is used, and card is removed 1387 * one: cd-gpio is used, and card is present 1388 */ 1389 if (present < 0) { 1390 /* If polling, assume that the card is always present. */ 1391 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1392 present = 1; 1393 else 1394 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1395 SDHCI_CARD_PRESENT; 1396 } 1397 1398 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1399 host->mrq->cmd->error = -ENOMEDIUM; 1400 tasklet_schedule(&host->finish_tasklet); 1401 } else { 1402 u32 present_state; 1403 1404 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1405 /* 1406 * Check if the re-tuning timer has already expired and there 1407 * is no on-going data transfer and DAT0 is not busy. If so, 1408 * we need to execute tuning procedure before sending command. 1409 */ 1410 if ((host->flags & SDHCI_NEEDS_RETUNING) && 1411 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) && 1412 (present_state & SDHCI_DATA_0_LVL_MASK)) { 1413 if (mmc->card) { 1414 /* eMMC uses cmd21 but sd and sdio use cmd19 */ 1415 tuning_opcode = 1416 mmc->card->type == MMC_TYPE_MMC ? 1417 MMC_SEND_TUNING_BLOCK_HS200 : 1418 MMC_SEND_TUNING_BLOCK; 1419 1420 /* Here we need to set the host->mrq to NULL, 1421 * in case the pending finish_tasklet 1422 * finishes it incorrectly. 1423 */ 1424 host->mrq = NULL; 1425 1426 spin_unlock_irqrestore(&host->lock, flags); 1427 sdhci_execute_tuning(mmc, tuning_opcode); 1428 spin_lock_irqsave(&host->lock, flags); 1429 1430 /* Restore original mmc_request structure */ 1431 host->mrq = mrq; 1432 } 1433 } 1434 1435 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1436 sdhci_send_command(host, mrq->sbc); 1437 else 1438 sdhci_send_command(host, mrq->cmd); 1439 } 1440 1441 mmiowb(); 1442 spin_unlock_irqrestore(&host->lock, flags); 1443 } 1444 1445 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1446 { 1447 u8 ctrl; 1448 1449 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1450 if (width == MMC_BUS_WIDTH_8) { 1451 ctrl &= ~SDHCI_CTRL_4BITBUS; 1452 if (host->version >= SDHCI_SPEC_300) 1453 ctrl |= SDHCI_CTRL_8BITBUS; 1454 } else { 1455 if (host->version >= SDHCI_SPEC_300) 1456 ctrl &= ~SDHCI_CTRL_8BITBUS; 1457 if (width == MMC_BUS_WIDTH_4) 1458 ctrl |= SDHCI_CTRL_4BITBUS; 1459 else 1460 ctrl &= ~SDHCI_CTRL_4BITBUS; 1461 } 1462 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1463 } 1464 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1465 1466 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1467 { 1468 u16 ctrl_2; 1469 1470 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1471 /* Select Bus Speed Mode for host */ 1472 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1473 if ((timing == MMC_TIMING_MMC_HS200) || 1474 (timing == MMC_TIMING_UHS_SDR104)) 1475 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1476 else if (timing == MMC_TIMING_UHS_SDR12) 1477 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1478 else if (timing == MMC_TIMING_UHS_SDR25) 1479 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1480 else if (timing == MMC_TIMING_UHS_SDR50) 1481 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1482 else if ((timing == MMC_TIMING_UHS_DDR50) || 1483 (timing == MMC_TIMING_MMC_DDR52)) 1484 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1485 else if (timing == MMC_TIMING_MMC_HS400) 1486 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1487 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1488 } 1489 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1490 1491 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1492 { 1493 unsigned long flags; 1494 u8 ctrl; 1495 struct mmc_host *mmc = host->mmc; 1496 1497 spin_lock_irqsave(&host->lock, flags); 1498 1499 if (host->flags & SDHCI_DEVICE_DEAD) { 1500 spin_unlock_irqrestore(&host->lock, flags); 1501 if (!IS_ERR(mmc->supply.vmmc) && 1502 ios->power_mode == MMC_POWER_OFF) 1503 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1504 return; 1505 } 1506 1507 /* 1508 * Reset the chip on each power off. 1509 * Should clear out any weird states. 1510 */ 1511 if (ios->power_mode == MMC_POWER_OFF) { 1512 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1513 sdhci_reinit(host); 1514 } 1515 1516 if (host->version >= SDHCI_SPEC_300 && 1517 (ios->power_mode == MMC_POWER_UP) && 1518 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1519 sdhci_enable_preset_value(host, false); 1520 1521 if (!ios->clock || ios->clock != host->clock) { 1522 host->ops->set_clock(host, ios->clock); 1523 host->clock = ios->clock; 1524 1525 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1526 host->clock) { 1527 host->timeout_clk = host->mmc->actual_clock ? 1528 host->mmc->actual_clock / 1000 : 1529 host->clock / 1000; 1530 host->mmc->max_busy_timeout = 1531 host->ops->get_max_timeout_count ? 1532 host->ops->get_max_timeout_count(host) : 1533 1 << 27; 1534 host->mmc->max_busy_timeout /= host->timeout_clk; 1535 } 1536 } 1537 1538 sdhci_set_power(host, ios->power_mode, ios->vdd); 1539 1540 if (host->ops->platform_send_init_74_clocks) 1541 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1542 1543 host->ops->set_bus_width(host, ios->bus_width); 1544 1545 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1546 1547 if ((ios->timing == MMC_TIMING_SD_HS || 1548 ios->timing == MMC_TIMING_MMC_HS) 1549 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1550 ctrl |= SDHCI_CTRL_HISPD; 1551 else 1552 ctrl &= ~SDHCI_CTRL_HISPD; 1553 1554 if (host->version >= SDHCI_SPEC_300) { 1555 u16 clk, ctrl_2; 1556 1557 /* In case of UHS-I modes, set High Speed Enable */ 1558 if ((ios->timing == MMC_TIMING_MMC_HS400) || 1559 (ios->timing == MMC_TIMING_MMC_HS200) || 1560 (ios->timing == MMC_TIMING_MMC_DDR52) || 1561 (ios->timing == MMC_TIMING_UHS_SDR50) || 1562 (ios->timing == MMC_TIMING_UHS_SDR104) || 1563 (ios->timing == MMC_TIMING_UHS_DDR50) || 1564 (ios->timing == MMC_TIMING_UHS_SDR25)) 1565 ctrl |= SDHCI_CTRL_HISPD; 1566 1567 if (!host->preset_enabled) { 1568 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1569 /* 1570 * We only need to set Driver Strength if the 1571 * preset value enable is not set. 1572 */ 1573 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1574 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1575 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1576 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1577 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1578 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1579 1580 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1581 } else { 1582 /* 1583 * According to SDHC Spec v3.00, if the Preset Value 1584 * Enable in the Host Control 2 register is set, we 1585 * need to reset SD Clock Enable before changing High 1586 * Speed Enable to avoid generating clock gliches. 1587 */ 1588 1589 /* Reset SD Clock Enable */ 1590 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1591 clk &= ~SDHCI_CLOCK_CARD_EN; 1592 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1593 1594 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1595 1596 /* Re-enable SD Clock */ 1597 host->ops->set_clock(host, host->clock); 1598 } 1599 1600 /* Reset SD Clock Enable */ 1601 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1602 clk &= ~SDHCI_CLOCK_CARD_EN; 1603 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1604 1605 host->ops->set_uhs_signaling(host, ios->timing); 1606 host->timing = ios->timing; 1607 1608 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1609 ((ios->timing == MMC_TIMING_UHS_SDR12) || 1610 (ios->timing == MMC_TIMING_UHS_SDR25) || 1611 (ios->timing == MMC_TIMING_UHS_SDR50) || 1612 (ios->timing == MMC_TIMING_UHS_SDR104) || 1613 (ios->timing == MMC_TIMING_UHS_DDR50))) { 1614 u16 preset; 1615 1616 sdhci_enable_preset_value(host, true); 1617 preset = sdhci_get_preset_value(host); 1618 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 1619 >> SDHCI_PRESET_DRV_SHIFT; 1620 } 1621 1622 /* Re-enable SD Clock */ 1623 host->ops->set_clock(host, host->clock); 1624 } else 1625 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1626 1627 /* 1628 * Some (ENE) controllers go apeshit on some ios operation, 1629 * signalling timeout and CRC errors even on CMD0. Resetting 1630 * it on each ios seems to solve the problem. 1631 */ 1632 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1633 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1634 1635 mmiowb(); 1636 spin_unlock_irqrestore(&host->lock, flags); 1637 } 1638 1639 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1640 { 1641 struct sdhci_host *host = mmc_priv(mmc); 1642 1643 sdhci_runtime_pm_get(host); 1644 sdhci_do_set_ios(host, ios); 1645 sdhci_runtime_pm_put(host); 1646 } 1647 1648 static int sdhci_do_get_cd(struct sdhci_host *host) 1649 { 1650 int gpio_cd = mmc_gpio_get_cd(host->mmc); 1651 1652 if (host->flags & SDHCI_DEVICE_DEAD) 1653 return 0; 1654 1655 /* If polling/nonremovable, assume that the card is always present. */ 1656 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 1657 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 1658 return 1; 1659 1660 /* Try slot gpio detect */ 1661 if (!IS_ERR_VALUE(gpio_cd)) 1662 return !!gpio_cd; 1663 1664 /* Host native card detect */ 1665 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1666 } 1667 1668 static int sdhci_get_cd(struct mmc_host *mmc) 1669 { 1670 struct sdhci_host *host = mmc_priv(mmc); 1671 int ret; 1672 1673 sdhci_runtime_pm_get(host); 1674 ret = sdhci_do_get_cd(host); 1675 sdhci_runtime_pm_put(host); 1676 return ret; 1677 } 1678 1679 static int sdhci_check_ro(struct sdhci_host *host) 1680 { 1681 unsigned long flags; 1682 int is_readonly; 1683 1684 spin_lock_irqsave(&host->lock, flags); 1685 1686 if (host->flags & SDHCI_DEVICE_DEAD) 1687 is_readonly = 0; 1688 else if (host->ops->get_ro) 1689 is_readonly = host->ops->get_ro(host); 1690 else 1691 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1692 & SDHCI_WRITE_PROTECT); 1693 1694 spin_unlock_irqrestore(&host->lock, flags); 1695 1696 /* This quirk needs to be replaced by a callback-function later */ 1697 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1698 !is_readonly : is_readonly; 1699 } 1700 1701 #define SAMPLE_COUNT 5 1702 1703 static int sdhci_do_get_ro(struct sdhci_host *host) 1704 { 1705 int i, ro_count; 1706 1707 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1708 return sdhci_check_ro(host); 1709 1710 ro_count = 0; 1711 for (i = 0; i < SAMPLE_COUNT; i++) { 1712 if (sdhci_check_ro(host)) { 1713 if (++ro_count > SAMPLE_COUNT / 2) 1714 return 1; 1715 } 1716 msleep(30); 1717 } 1718 return 0; 1719 } 1720 1721 static void sdhci_hw_reset(struct mmc_host *mmc) 1722 { 1723 struct sdhci_host *host = mmc_priv(mmc); 1724 1725 if (host->ops && host->ops->hw_reset) 1726 host->ops->hw_reset(host); 1727 } 1728 1729 static int sdhci_get_ro(struct mmc_host *mmc) 1730 { 1731 struct sdhci_host *host = mmc_priv(mmc); 1732 int ret; 1733 1734 sdhci_runtime_pm_get(host); 1735 ret = sdhci_do_get_ro(host); 1736 sdhci_runtime_pm_put(host); 1737 return ret; 1738 } 1739 1740 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1741 { 1742 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 1743 if (enable) 1744 host->ier |= SDHCI_INT_CARD_INT; 1745 else 1746 host->ier &= ~SDHCI_INT_CARD_INT; 1747 1748 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1749 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1750 mmiowb(); 1751 } 1752 } 1753 1754 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1755 { 1756 struct sdhci_host *host = mmc_priv(mmc); 1757 unsigned long flags; 1758 1759 sdhci_runtime_pm_get(host); 1760 1761 spin_lock_irqsave(&host->lock, flags); 1762 if (enable) 1763 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1764 else 1765 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1766 1767 sdhci_enable_sdio_irq_nolock(host, enable); 1768 spin_unlock_irqrestore(&host->lock, flags); 1769 1770 sdhci_runtime_pm_put(host); 1771 } 1772 1773 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, 1774 struct mmc_ios *ios) 1775 { 1776 struct mmc_host *mmc = host->mmc; 1777 u16 ctrl; 1778 int ret; 1779 1780 /* 1781 * Signal Voltage Switching is only applicable for Host Controllers 1782 * v3.00 and above. 1783 */ 1784 if (host->version < SDHCI_SPEC_300) 1785 return 0; 1786 1787 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1788 1789 switch (ios->signal_voltage) { 1790 case MMC_SIGNAL_VOLTAGE_330: 1791 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1792 ctrl &= ~SDHCI_CTRL_VDD_180; 1793 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1794 1795 if (!IS_ERR(mmc->supply.vqmmc)) { 1796 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000, 1797 3600000); 1798 if (ret) { 1799 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 1800 mmc_hostname(mmc)); 1801 return -EIO; 1802 } 1803 } 1804 /* Wait for 5ms */ 1805 usleep_range(5000, 5500); 1806 1807 /* 3.3V regulator output should be stable within 5 ms */ 1808 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1809 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1810 return 0; 1811 1812 pr_warn("%s: 3.3V regulator output did not became stable\n", 1813 mmc_hostname(mmc)); 1814 1815 return -EAGAIN; 1816 case MMC_SIGNAL_VOLTAGE_180: 1817 if (!IS_ERR(mmc->supply.vqmmc)) { 1818 ret = regulator_set_voltage(mmc->supply.vqmmc, 1819 1700000, 1950000); 1820 if (ret) { 1821 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 1822 mmc_hostname(mmc)); 1823 return -EIO; 1824 } 1825 } 1826 1827 /* 1828 * Enable 1.8V Signal Enable in the Host Control2 1829 * register 1830 */ 1831 ctrl |= SDHCI_CTRL_VDD_180; 1832 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1833 1834 /* Some controller need to do more when switching */ 1835 if (host->ops->voltage_switch) 1836 host->ops->voltage_switch(host); 1837 1838 /* 1.8V regulator output should be stable within 5 ms */ 1839 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1840 if (ctrl & SDHCI_CTRL_VDD_180) 1841 return 0; 1842 1843 pr_warn("%s: 1.8V regulator output did not became stable\n", 1844 mmc_hostname(mmc)); 1845 1846 return -EAGAIN; 1847 case MMC_SIGNAL_VOLTAGE_120: 1848 if (!IS_ERR(mmc->supply.vqmmc)) { 1849 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000, 1850 1300000); 1851 if (ret) { 1852 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 1853 mmc_hostname(mmc)); 1854 return -EIO; 1855 } 1856 } 1857 return 0; 1858 default: 1859 /* No signal voltage switch required */ 1860 return 0; 1861 } 1862 } 1863 1864 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1865 struct mmc_ios *ios) 1866 { 1867 struct sdhci_host *host = mmc_priv(mmc); 1868 int err; 1869 1870 if (host->version < SDHCI_SPEC_300) 1871 return 0; 1872 sdhci_runtime_pm_get(host); 1873 err = sdhci_do_start_signal_voltage_switch(host, ios); 1874 sdhci_runtime_pm_put(host); 1875 return err; 1876 } 1877 1878 static int sdhci_card_busy(struct mmc_host *mmc) 1879 { 1880 struct sdhci_host *host = mmc_priv(mmc); 1881 u32 present_state; 1882 1883 sdhci_runtime_pm_get(host); 1884 /* Check whether DAT[3:0] is 0000 */ 1885 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1886 sdhci_runtime_pm_put(host); 1887 1888 return !(present_state & SDHCI_DATA_LVL_MASK); 1889 } 1890 1891 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 1892 { 1893 struct sdhci_host *host = mmc_priv(mmc); 1894 unsigned long flags; 1895 1896 spin_lock_irqsave(&host->lock, flags); 1897 host->flags |= SDHCI_HS400_TUNING; 1898 spin_unlock_irqrestore(&host->lock, flags); 1899 1900 return 0; 1901 } 1902 1903 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1904 { 1905 struct sdhci_host *host = mmc_priv(mmc); 1906 u16 ctrl; 1907 int tuning_loop_counter = MAX_TUNING_LOOP; 1908 int err = 0; 1909 unsigned long flags; 1910 unsigned int tuning_count = 0; 1911 bool hs400_tuning; 1912 1913 sdhci_runtime_pm_get(host); 1914 spin_lock_irqsave(&host->lock, flags); 1915 1916 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 1917 host->flags &= ~SDHCI_HS400_TUNING; 1918 1919 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 1920 tuning_count = host->tuning_count; 1921 1922 /* 1923 * The Host Controller needs tuning only in case of SDR104 mode 1924 * and for SDR50 mode when Use Tuning for SDR50 is set in the 1925 * Capabilities register. 1926 * If the Host Controller supports the HS200 mode then the 1927 * tuning function has to be executed. 1928 */ 1929 switch (host->timing) { 1930 /* HS400 tuning is done in HS200 mode */ 1931 case MMC_TIMING_MMC_HS400: 1932 err = -EINVAL; 1933 goto out_unlock; 1934 1935 case MMC_TIMING_MMC_HS200: 1936 /* 1937 * Periodic re-tuning for HS400 is not expected to be needed, so 1938 * disable it here. 1939 */ 1940 if (hs400_tuning) 1941 tuning_count = 0; 1942 break; 1943 1944 case MMC_TIMING_UHS_SDR104: 1945 break; 1946 1947 case MMC_TIMING_UHS_SDR50: 1948 if (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1949 host->flags & SDHCI_SDR104_NEEDS_TUNING) 1950 break; 1951 /* FALLTHROUGH */ 1952 1953 default: 1954 goto out_unlock; 1955 } 1956 1957 if (host->ops->platform_execute_tuning) { 1958 spin_unlock_irqrestore(&host->lock, flags); 1959 err = host->ops->platform_execute_tuning(host, opcode); 1960 sdhci_runtime_pm_put(host); 1961 return err; 1962 } 1963 1964 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1965 ctrl |= SDHCI_CTRL_EXEC_TUNING; 1966 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 1967 ctrl |= SDHCI_CTRL_TUNED_CLK; 1968 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1969 1970 /* 1971 * As per the Host Controller spec v3.00, tuning command 1972 * generates Buffer Read Ready interrupt, so enable that. 1973 * 1974 * Note: The spec clearly says that when tuning sequence 1975 * is being performed, the controller does not generate 1976 * interrupts other than Buffer Read Ready interrupt. But 1977 * to make sure we don't hit a controller bug, we _only_ 1978 * enable Buffer Read Ready interrupt here. 1979 */ 1980 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 1981 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 1982 1983 /* 1984 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number 1985 * of loops reaches 40 times or a timeout of 150ms occurs. 1986 */ 1987 do { 1988 struct mmc_command cmd = {0}; 1989 struct mmc_request mrq = {NULL}; 1990 1991 cmd.opcode = opcode; 1992 cmd.arg = 0; 1993 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1994 cmd.retries = 0; 1995 cmd.data = NULL; 1996 cmd.error = 0; 1997 1998 if (tuning_loop_counter-- == 0) 1999 break; 2000 2001 mrq.cmd = &cmd; 2002 host->mrq = &mrq; 2003 2004 /* 2005 * In response to CMD19, the card sends 64 bytes of tuning 2006 * block to the Host Controller. So we set the block size 2007 * to 64 here. 2008 */ 2009 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { 2010 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2011 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), 2012 SDHCI_BLOCK_SIZE); 2013 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) 2014 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 2015 SDHCI_BLOCK_SIZE); 2016 } else { 2017 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 2018 SDHCI_BLOCK_SIZE); 2019 } 2020 2021 /* 2022 * The tuning block is sent by the card to the host controller. 2023 * So we set the TRNS_READ bit in the Transfer Mode register. 2024 * This also takes care of setting DMA Enable and Multi Block 2025 * Select in the same register to 0. 2026 */ 2027 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2028 2029 sdhci_send_command(host, &cmd); 2030 2031 host->cmd = NULL; 2032 host->mrq = NULL; 2033 2034 spin_unlock_irqrestore(&host->lock, flags); 2035 /* Wait for Buffer Read Ready interrupt */ 2036 wait_event_interruptible_timeout(host->buf_ready_int, 2037 (host->tuning_done == 1), 2038 msecs_to_jiffies(50)); 2039 spin_lock_irqsave(&host->lock, flags); 2040 2041 if (!host->tuning_done) { 2042 pr_info(DRIVER_NAME ": Timeout waiting for " 2043 "Buffer Read Ready interrupt during tuning " 2044 "procedure, falling back to fixed sampling " 2045 "clock\n"); 2046 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2047 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2048 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2049 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2050 2051 err = -EIO; 2052 goto out; 2053 } 2054 2055 host->tuning_done = 0; 2056 2057 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2058 2059 /* eMMC spec does not require a delay between tuning cycles */ 2060 if (opcode == MMC_SEND_TUNING_BLOCK) 2061 mdelay(1); 2062 } while (ctrl & SDHCI_CTRL_EXEC_TUNING); 2063 2064 /* 2065 * The Host Driver has exhausted the maximum number of loops allowed, 2066 * so use fixed sampling frequency. 2067 */ 2068 if (tuning_loop_counter < 0) { 2069 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2070 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2071 } 2072 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 2073 pr_info(DRIVER_NAME ": Tuning procedure" 2074 " failed, falling back to fixed sampling" 2075 " clock\n"); 2076 err = -EIO; 2077 } 2078 2079 out: 2080 host->flags &= ~SDHCI_NEEDS_RETUNING; 2081 2082 if (tuning_count) { 2083 host->flags |= SDHCI_USING_RETUNING_TIMER; 2084 mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); 2085 } 2086 2087 /* 2088 * In case tuning fails, host controllers which support re-tuning can 2089 * try tuning again at a later time, when the re-tuning timer expires. 2090 * So for these controllers, we return 0. Since there might be other 2091 * controllers who do not have this capability, we return error for 2092 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using 2093 * a retuning timer to do the retuning for the card. 2094 */ 2095 if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) 2096 err = 0; 2097 2098 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2099 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2100 out_unlock: 2101 spin_unlock_irqrestore(&host->lock, flags); 2102 sdhci_runtime_pm_put(host); 2103 2104 return err; 2105 } 2106 2107 2108 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2109 { 2110 /* Host Controller v3.00 defines preset value registers */ 2111 if (host->version < SDHCI_SPEC_300) 2112 return; 2113 2114 /* 2115 * We only enable or disable Preset Value if they are not already 2116 * enabled or disabled respectively. Otherwise, we bail out. 2117 */ 2118 if (host->preset_enabled != enable) { 2119 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2120 2121 if (enable) 2122 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2123 else 2124 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2125 2126 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2127 2128 if (enable) 2129 host->flags |= SDHCI_PV_ENABLED; 2130 else 2131 host->flags &= ~SDHCI_PV_ENABLED; 2132 2133 host->preset_enabled = enable; 2134 } 2135 } 2136 2137 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2138 int err) 2139 { 2140 struct sdhci_host *host = mmc_priv(mmc); 2141 struct mmc_data *data = mrq->data; 2142 2143 if (host->flags & SDHCI_REQ_USE_DMA) { 2144 if (data->host_cookie) 2145 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2146 data->flags & MMC_DATA_WRITE ? 2147 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2148 mrq->data->host_cookie = 0; 2149 } 2150 } 2151 2152 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 2153 struct mmc_data *data, 2154 struct sdhci_host_next *next) 2155 { 2156 int sg_count; 2157 2158 if (!next && data->host_cookie && 2159 data->host_cookie != host->next_data.cookie) { 2160 pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n", 2161 __func__, data->host_cookie, host->next_data.cookie); 2162 data->host_cookie = 0; 2163 } 2164 2165 /* Check if next job is already prepared */ 2166 if (next || 2167 (!next && data->host_cookie != host->next_data.cookie)) { 2168 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, 2169 data->sg_len, 2170 data->flags & MMC_DATA_WRITE ? 2171 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2172 2173 } else { 2174 sg_count = host->next_data.sg_count; 2175 host->next_data.sg_count = 0; 2176 } 2177 2178 2179 if (sg_count == 0) 2180 return -EINVAL; 2181 2182 if (next) { 2183 next->sg_count = sg_count; 2184 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; 2185 } else 2186 host->sg_count = sg_count; 2187 2188 return sg_count; 2189 } 2190 2191 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 2192 bool is_first_req) 2193 { 2194 struct sdhci_host *host = mmc_priv(mmc); 2195 2196 if (mrq->data->host_cookie) { 2197 mrq->data->host_cookie = 0; 2198 return; 2199 } 2200 2201 if (host->flags & SDHCI_REQ_USE_DMA) 2202 if (sdhci_pre_dma_transfer(host, 2203 mrq->data, 2204 &host->next_data) < 0) 2205 mrq->data->host_cookie = 0; 2206 } 2207 2208 static void sdhci_card_event(struct mmc_host *mmc) 2209 { 2210 struct sdhci_host *host = mmc_priv(mmc); 2211 unsigned long flags; 2212 int present; 2213 2214 /* First check if client has provided their own card event */ 2215 if (host->ops->card_event) 2216 host->ops->card_event(host); 2217 2218 present = sdhci_do_get_cd(host); 2219 2220 spin_lock_irqsave(&host->lock, flags); 2221 2222 /* Check host->mrq first in case we are runtime suspended */ 2223 if (host->mrq && !present) { 2224 pr_err("%s: Card removed during transfer!\n", 2225 mmc_hostname(host->mmc)); 2226 pr_err("%s: Resetting controller.\n", 2227 mmc_hostname(host->mmc)); 2228 2229 sdhci_do_reset(host, SDHCI_RESET_CMD); 2230 sdhci_do_reset(host, SDHCI_RESET_DATA); 2231 2232 host->mrq->cmd->error = -ENOMEDIUM; 2233 tasklet_schedule(&host->finish_tasklet); 2234 } 2235 2236 spin_unlock_irqrestore(&host->lock, flags); 2237 } 2238 2239 static const struct mmc_host_ops sdhci_ops = { 2240 .request = sdhci_request, 2241 .post_req = sdhci_post_req, 2242 .pre_req = sdhci_pre_req, 2243 .set_ios = sdhci_set_ios, 2244 .get_cd = sdhci_get_cd, 2245 .get_ro = sdhci_get_ro, 2246 .hw_reset = sdhci_hw_reset, 2247 .enable_sdio_irq = sdhci_enable_sdio_irq, 2248 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2249 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2250 .execute_tuning = sdhci_execute_tuning, 2251 .card_event = sdhci_card_event, 2252 .card_busy = sdhci_card_busy, 2253 }; 2254 2255 /*****************************************************************************\ 2256 * * 2257 * Tasklets * 2258 * * 2259 \*****************************************************************************/ 2260 2261 static void sdhci_tasklet_finish(unsigned long param) 2262 { 2263 struct sdhci_host *host; 2264 unsigned long flags; 2265 struct mmc_request *mrq; 2266 2267 host = (struct sdhci_host*)param; 2268 2269 spin_lock_irqsave(&host->lock, flags); 2270 2271 /* 2272 * If this tasklet gets rescheduled while running, it will 2273 * be run again afterwards but without any active request. 2274 */ 2275 if (!host->mrq) { 2276 spin_unlock_irqrestore(&host->lock, flags); 2277 return; 2278 } 2279 2280 del_timer(&host->timer); 2281 2282 mrq = host->mrq; 2283 2284 /* 2285 * The controller needs a reset of internal state machines 2286 * upon error conditions. 2287 */ 2288 if (!(host->flags & SDHCI_DEVICE_DEAD) && 2289 ((mrq->cmd && mrq->cmd->error) || 2290 (mrq->sbc && mrq->sbc->error) || 2291 (mrq->data && ((mrq->data->error && !mrq->data->stop) || 2292 (mrq->data->stop && mrq->data->stop->error))) || 2293 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2294 2295 /* Some controllers need this kick or reset won't work here */ 2296 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2297 /* This is to force an update */ 2298 host->ops->set_clock(host, host->clock); 2299 2300 /* Spec says we should do both at the same time, but Ricoh 2301 controllers do not like that. */ 2302 sdhci_do_reset(host, SDHCI_RESET_CMD); 2303 sdhci_do_reset(host, SDHCI_RESET_DATA); 2304 } 2305 2306 host->mrq = NULL; 2307 host->cmd = NULL; 2308 host->data = NULL; 2309 2310 #ifndef SDHCI_USE_LEDS_CLASS 2311 sdhci_deactivate_led(host); 2312 #endif 2313 2314 mmiowb(); 2315 spin_unlock_irqrestore(&host->lock, flags); 2316 2317 mmc_request_done(host->mmc, mrq); 2318 sdhci_runtime_pm_put(host); 2319 } 2320 2321 static void sdhci_timeout_timer(unsigned long data) 2322 { 2323 struct sdhci_host *host; 2324 unsigned long flags; 2325 2326 host = (struct sdhci_host*)data; 2327 2328 spin_lock_irqsave(&host->lock, flags); 2329 2330 if (host->mrq) { 2331 pr_err("%s: Timeout waiting for hardware " 2332 "interrupt.\n", mmc_hostname(host->mmc)); 2333 sdhci_dumpregs(host); 2334 2335 if (host->data) { 2336 host->data->error = -ETIMEDOUT; 2337 sdhci_finish_data(host); 2338 } else { 2339 if (host->cmd) 2340 host->cmd->error = -ETIMEDOUT; 2341 else 2342 host->mrq->cmd->error = -ETIMEDOUT; 2343 2344 tasklet_schedule(&host->finish_tasklet); 2345 } 2346 } 2347 2348 mmiowb(); 2349 spin_unlock_irqrestore(&host->lock, flags); 2350 } 2351 2352 static void sdhci_tuning_timer(unsigned long data) 2353 { 2354 struct sdhci_host *host; 2355 unsigned long flags; 2356 2357 host = (struct sdhci_host *)data; 2358 2359 spin_lock_irqsave(&host->lock, flags); 2360 2361 host->flags |= SDHCI_NEEDS_RETUNING; 2362 2363 spin_unlock_irqrestore(&host->lock, flags); 2364 } 2365 2366 /*****************************************************************************\ 2367 * * 2368 * Interrupt handling * 2369 * * 2370 \*****************************************************************************/ 2371 2372 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) 2373 { 2374 BUG_ON(intmask == 0); 2375 2376 if (!host->cmd) { 2377 pr_err("%s: Got command interrupt 0x%08x even " 2378 "though no command operation was in progress.\n", 2379 mmc_hostname(host->mmc), (unsigned)intmask); 2380 sdhci_dumpregs(host); 2381 return; 2382 } 2383 2384 if (intmask & SDHCI_INT_TIMEOUT) 2385 host->cmd->error = -ETIMEDOUT; 2386 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | 2387 SDHCI_INT_INDEX)) 2388 host->cmd->error = -EILSEQ; 2389 2390 if (host->cmd->error) { 2391 tasklet_schedule(&host->finish_tasklet); 2392 return; 2393 } 2394 2395 /* 2396 * The host can send and interrupt when the busy state has 2397 * ended, allowing us to wait without wasting CPU cycles. 2398 * Unfortunately this is overloaded on the "data complete" 2399 * interrupt, so we need to take some care when handling 2400 * it. 2401 * 2402 * Note: The 1.0 specification is a bit ambiguous about this 2403 * feature so there might be some problems with older 2404 * controllers. 2405 */ 2406 if (host->cmd->flags & MMC_RSP_BUSY) { 2407 if (host->cmd->data) 2408 DBG("Cannot wait for busy signal when also " 2409 "doing a data transfer"); 2410 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) 2411 && !host->busy_handle) { 2412 /* Mark that command complete before busy is ended */ 2413 host->busy_handle = 1; 2414 return; 2415 } 2416 2417 /* The controller does not support the end-of-busy IRQ, 2418 * fall through and take the SDHCI_INT_RESPONSE */ 2419 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 2420 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) { 2421 *mask &= ~SDHCI_INT_DATA_END; 2422 } 2423 2424 if (intmask & SDHCI_INT_RESPONSE) 2425 sdhci_finish_command(host); 2426 } 2427 2428 #ifdef CONFIG_MMC_DEBUG 2429 static void sdhci_adma_show_error(struct sdhci_host *host) 2430 { 2431 const char *name = mmc_hostname(host->mmc); 2432 void *desc = host->adma_table; 2433 2434 sdhci_dumpregs(host); 2435 2436 while (true) { 2437 struct sdhci_adma2_64_desc *dma_desc = desc; 2438 2439 if (host->flags & SDHCI_USE_64_BIT_DMA) 2440 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2441 name, desc, le32_to_cpu(dma_desc->addr_hi), 2442 le32_to_cpu(dma_desc->addr_lo), 2443 le16_to_cpu(dma_desc->len), 2444 le16_to_cpu(dma_desc->cmd)); 2445 else 2446 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2447 name, desc, le32_to_cpu(dma_desc->addr_lo), 2448 le16_to_cpu(dma_desc->len), 2449 le16_to_cpu(dma_desc->cmd)); 2450 2451 desc += host->desc_sz; 2452 2453 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2454 break; 2455 } 2456 } 2457 #else 2458 static void sdhci_adma_show_error(struct sdhci_host *host) { } 2459 #endif 2460 2461 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2462 { 2463 u32 command; 2464 BUG_ON(intmask == 0); 2465 2466 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2467 if (intmask & SDHCI_INT_DATA_AVAIL) { 2468 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2469 if (command == MMC_SEND_TUNING_BLOCK || 2470 command == MMC_SEND_TUNING_BLOCK_HS200) { 2471 host->tuning_done = 1; 2472 wake_up(&host->buf_ready_int); 2473 return; 2474 } 2475 } 2476 2477 if (!host->data) { 2478 /* 2479 * The "data complete" interrupt is also used to 2480 * indicate that a busy state has ended. See comment 2481 * above in sdhci_cmd_irq(). 2482 */ 2483 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { 2484 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2485 host->cmd->error = -ETIMEDOUT; 2486 tasklet_schedule(&host->finish_tasklet); 2487 return; 2488 } 2489 if (intmask & SDHCI_INT_DATA_END) { 2490 /* 2491 * Some cards handle busy-end interrupt 2492 * before the command completed, so make 2493 * sure we do things in the proper order. 2494 */ 2495 if (host->busy_handle) 2496 sdhci_finish_command(host); 2497 else 2498 host->busy_handle = 1; 2499 return; 2500 } 2501 } 2502 2503 pr_err("%s: Got data interrupt 0x%08x even " 2504 "though no data operation was in progress.\n", 2505 mmc_hostname(host->mmc), (unsigned)intmask); 2506 sdhci_dumpregs(host); 2507 2508 return; 2509 } 2510 2511 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2512 host->data->error = -ETIMEDOUT; 2513 else if (intmask & SDHCI_INT_DATA_END_BIT) 2514 host->data->error = -EILSEQ; 2515 else if ((intmask & SDHCI_INT_DATA_CRC) && 2516 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2517 != MMC_BUS_TEST_R) 2518 host->data->error = -EILSEQ; 2519 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2520 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2521 sdhci_adma_show_error(host); 2522 host->data->error = -EIO; 2523 if (host->ops->adma_workaround) 2524 host->ops->adma_workaround(host, intmask); 2525 } 2526 2527 if (host->data->error) 2528 sdhci_finish_data(host); 2529 else { 2530 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2531 sdhci_transfer_pio(host); 2532 2533 /* 2534 * We currently don't do anything fancy with DMA 2535 * boundaries, but as we can't disable the feature 2536 * we need to at least restart the transfer. 2537 * 2538 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2539 * should return a valid address to continue from, but as 2540 * some controllers are faulty, don't trust them. 2541 */ 2542 if (intmask & SDHCI_INT_DMA_END) { 2543 u32 dmastart, dmanow; 2544 dmastart = sg_dma_address(host->data->sg); 2545 dmanow = dmastart + host->data->bytes_xfered; 2546 /* 2547 * Force update to the next DMA block boundary. 2548 */ 2549 dmanow = (dmanow & 2550 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2551 SDHCI_DEFAULT_BOUNDARY_SIZE; 2552 host->data->bytes_xfered = dmanow - dmastart; 2553 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," 2554 " next 0x%08x\n", 2555 mmc_hostname(host->mmc), dmastart, 2556 host->data->bytes_xfered, dmanow); 2557 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2558 } 2559 2560 if (intmask & SDHCI_INT_DATA_END) { 2561 if (host->cmd) { 2562 /* 2563 * Data managed to finish before the 2564 * command completed. Make sure we do 2565 * things in the proper order. 2566 */ 2567 host->data_early = 1; 2568 } else { 2569 sdhci_finish_data(host); 2570 } 2571 } 2572 } 2573 } 2574 2575 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2576 { 2577 irqreturn_t result = IRQ_NONE; 2578 struct sdhci_host *host = dev_id; 2579 u32 intmask, mask, unexpected = 0; 2580 int max_loops = 16; 2581 2582 spin_lock(&host->lock); 2583 2584 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2585 spin_unlock(&host->lock); 2586 return IRQ_NONE; 2587 } 2588 2589 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2590 if (!intmask || intmask == 0xffffffff) { 2591 result = IRQ_NONE; 2592 goto out; 2593 } 2594 2595 do { 2596 /* Clear selected interrupts. */ 2597 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2598 SDHCI_INT_BUS_POWER); 2599 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2600 2601 DBG("*** %s got interrupt: 0x%08x\n", 2602 mmc_hostname(host->mmc), intmask); 2603 2604 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2605 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2606 SDHCI_CARD_PRESENT; 2607 2608 /* 2609 * There is a observation on i.mx esdhc. INSERT 2610 * bit will be immediately set again when it gets 2611 * cleared, if a card is inserted. We have to mask 2612 * the irq to prevent interrupt storm which will 2613 * freeze the system. And the REMOVE gets the 2614 * same situation. 2615 * 2616 * More testing are needed here to ensure it works 2617 * for other platforms though. 2618 */ 2619 host->ier &= ~(SDHCI_INT_CARD_INSERT | 2620 SDHCI_INT_CARD_REMOVE); 2621 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 2622 SDHCI_INT_CARD_INSERT; 2623 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2624 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2625 2626 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2627 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2628 2629 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 2630 SDHCI_INT_CARD_REMOVE); 2631 result = IRQ_WAKE_THREAD; 2632 } 2633 2634 if (intmask & SDHCI_INT_CMD_MASK) 2635 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, 2636 &intmask); 2637 2638 if (intmask & SDHCI_INT_DATA_MASK) 2639 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2640 2641 if (intmask & SDHCI_INT_BUS_POWER) 2642 pr_err("%s: Card is consuming too much power!\n", 2643 mmc_hostname(host->mmc)); 2644 2645 if (intmask & SDHCI_INT_CARD_INT) { 2646 sdhci_enable_sdio_irq_nolock(host, false); 2647 host->thread_isr |= SDHCI_INT_CARD_INT; 2648 result = IRQ_WAKE_THREAD; 2649 } 2650 2651 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 2652 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2653 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 2654 SDHCI_INT_CARD_INT); 2655 2656 if (intmask) { 2657 unexpected |= intmask; 2658 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2659 } 2660 2661 if (result == IRQ_NONE) 2662 result = IRQ_HANDLED; 2663 2664 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2665 } while (intmask && --max_loops); 2666 out: 2667 spin_unlock(&host->lock); 2668 2669 if (unexpected) { 2670 pr_err("%s: Unexpected interrupt 0x%08x.\n", 2671 mmc_hostname(host->mmc), unexpected); 2672 sdhci_dumpregs(host); 2673 } 2674 2675 return result; 2676 } 2677 2678 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 2679 { 2680 struct sdhci_host *host = dev_id; 2681 unsigned long flags; 2682 u32 isr; 2683 2684 spin_lock_irqsave(&host->lock, flags); 2685 isr = host->thread_isr; 2686 host->thread_isr = 0; 2687 spin_unlock_irqrestore(&host->lock, flags); 2688 2689 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2690 sdhci_card_event(host->mmc); 2691 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 2692 } 2693 2694 if (isr & SDHCI_INT_CARD_INT) { 2695 sdio_run_irqs(host->mmc); 2696 2697 spin_lock_irqsave(&host->lock, flags); 2698 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2699 sdhci_enable_sdio_irq_nolock(host, true); 2700 spin_unlock_irqrestore(&host->lock, flags); 2701 } 2702 2703 return isr ? IRQ_HANDLED : IRQ_NONE; 2704 } 2705 2706 /*****************************************************************************\ 2707 * * 2708 * Suspend/resume * 2709 * * 2710 \*****************************************************************************/ 2711 2712 #ifdef CONFIG_PM 2713 void sdhci_enable_irq_wakeups(struct sdhci_host *host) 2714 { 2715 u8 val; 2716 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2717 | SDHCI_WAKE_ON_INT; 2718 2719 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2720 val |= mask ; 2721 /* Avoid fake wake up */ 2722 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2723 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); 2724 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2725 } 2726 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 2727 2728 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 2729 { 2730 u8 val; 2731 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2732 | SDHCI_WAKE_ON_INT; 2733 2734 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2735 val &= ~mask; 2736 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2737 } 2738 2739 int sdhci_suspend_host(struct sdhci_host *host) 2740 { 2741 sdhci_disable_card_detection(host); 2742 2743 /* Disable tuning since we are suspending */ 2744 if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2745 del_timer_sync(&host->tuning_timer); 2746 host->flags &= ~SDHCI_NEEDS_RETUNING; 2747 } 2748 2749 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2750 host->ier = 0; 2751 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 2752 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2753 free_irq(host->irq, host); 2754 } else { 2755 sdhci_enable_irq_wakeups(host); 2756 enable_irq_wake(host->irq); 2757 } 2758 return 0; 2759 } 2760 2761 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 2762 2763 int sdhci_resume_host(struct sdhci_host *host) 2764 { 2765 int ret = 0; 2766 2767 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2768 if (host->ops->enable_dma) 2769 host->ops->enable_dma(host); 2770 } 2771 2772 if (!device_may_wakeup(mmc_dev(host->mmc))) { 2773 ret = request_threaded_irq(host->irq, sdhci_irq, 2774 sdhci_thread_irq, IRQF_SHARED, 2775 mmc_hostname(host->mmc), host); 2776 if (ret) 2777 return ret; 2778 } else { 2779 sdhci_disable_irq_wakeups(host); 2780 disable_irq_wake(host->irq); 2781 } 2782 2783 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2784 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2785 /* Card keeps power but host controller does not */ 2786 sdhci_init(host, 0); 2787 host->pwr = 0; 2788 host->clock = 0; 2789 sdhci_do_set_ios(host, &host->mmc->ios); 2790 } else { 2791 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2792 mmiowb(); 2793 } 2794 2795 sdhci_enable_card_detection(host); 2796 2797 /* Set the re-tuning expiration flag */ 2798 if (host->flags & SDHCI_USING_RETUNING_TIMER) 2799 host->flags |= SDHCI_NEEDS_RETUNING; 2800 2801 return ret; 2802 } 2803 2804 EXPORT_SYMBOL_GPL(sdhci_resume_host); 2805 2806 static int sdhci_runtime_pm_get(struct sdhci_host *host) 2807 { 2808 return pm_runtime_get_sync(host->mmc->parent); 2809 } 2810 2811 static int sdhci_runtime_pm_put(struct sdhci_host *host) 2812 { 2813 pm_runtime_mark_last_busy(host->mmc->parent); 2814 return pm_runtime_put_autosuspend(host->mmc->parent); 2815 } 2816 2817 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 2818 { 2819 if (host->runtime_suspended || host->bus_on) 2820 return; 2821 host->bus_on = true; 2822 pm_runtime_get_noresume(host->mmc->parent); 2823 } 2824 2825 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 2826 { 2827 if (host->runtime_suspended || !host->bus_on) 2828 return; 2829 host->bus_on = false; 2830 pm_runtime_put_noidle(host->mmc->parent); 2831 } 2832 2833 int sdhci_runtime_suspend_host(struct sdhci_host *host) 2834 { 2835 unsigned long flags; 2836 2837 /* Disable tuning since we are suspending */ 2838 if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2839 del_timer_sync(&host->tuning_timer); 2840 host->flags &= ~SDHCI_NEEDS_RETUNING; 2841 } 2842 2843 spin_lock_irqsave(&host->lock, flags); 2844 host->ier &= SDHCI_INT_CARD_INT; 2845 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2846 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2847 spin_unlock_irqrestore(&host->lock, flags); 2848 2849 synchronize_hardirq(host->irq); 2850 2851 spin_lock_irqsave(&host->lock, flags); 2852 host->runtime_suspended = true; 2853 spin_unlock_irqrestore(&host->lock, flags); 2854 2855 return 0; 2856 } 2857 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 2858 2859 int sdhci_runtime_resume_host(struct sdhci_host *host) 2860 { 2861 unsigned long flags; 2862 int host_flags = host->flags; 2863 2864 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2865 if (host->ops->enable_dma) 2866 host->ops->enable_dma(host); 2867 } 2868 2869 sdhci_init(host, 0); 2870 2871 /* Force clock and power re-program */ 2872 host->pwr = 0; 2873 host->clock = 0; 2874 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); 2875 sdhci_do_set_ios(host, &host->mmc->ios); 2876 2877 if ((host_flags & SDHCI_PV_ENABLED) && 2878 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 2879 spin_lock_irqsave(&host->lock, flags); 2880 sdhci_enable_preset_value(host, true); 2881 spin_unlock_irqrestore(&host->lock, flags); 2882 } 2883 2884 /* Set the re-tuning expiration flag */ 2885 if (host->flags & SDHCI_USING_RETUNING_TIMER) 2886 host->flags |= SDHCI_NEEDS_RETUNING; 2887 2888 spin_lock_irqsave(&host->lock, flags); 2889 2890 host->runtime_suspended = false; 2891 2892 /* Enable SDIO IRQ */ 2893 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2894 sdhci_enable_sdio_irq_nolock(host, true); 2895 2896 /* Enable Card Detection */ 2897 sdhci_enable_card_detection(host); 2898 2899 spin_unlock_irqrestore(&host->lock, flags); 2900 2901 return 0; 2902 } 2903 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2904 2905 #endif /* CONFIG_PM */ 2906 2907 /*****************************************************************************\ 2908 * * 2909 * Device allocation/registration * 2910 * * 2911 \*****************************************************************************/ 2912 2913 struct sdhci_host *sdhci_alloc_host(struct device *dev, 2914 size_t priv_size) 2915 { 2916 struct mmc_host *mmc; 2917 struct sdhci_host *host; 2918 2919 WARN_ON(dev == NULL); 2920 2921 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 2922 if (!mmc) 2923 return ERR_PTR(-ENOMEM); 2924 2925 host = mmc_priv(mmc); 2926 host->mmc = mmc; 2927 2928 return host; 2929 } 2930 2931 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 2932 2933 int sdhci_add_host(struct sdhci_host *host) 2934 { 2935 struct mmc_host *mmc; 2936 u32 caps[2] = {0, 0}; 2937 u32 max_current_caps; 2938 unsigned int ocr_avail; 2939 unsigned int override_timeout_clk; 2940 int ret; 2941 2942 WARN_ON(host == NULL); 2943 if (host == NULL) 2944 return -EINVAL; 2945 2946 mmc = host->mmc; 2947 2948 if (debug_quirks) 2949 host->quirks = debug_quirks; 2950 if (debug_quirks2) 2951 host->quirks2 = debug_quirks2; 2952 2953 override_timeout_clk = host->timeout_clk; 2954 2955 sdhci_do_reset(host, SDHCI_RESET_ALL); 2956 2957 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2958 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2959 >> SDHCI_SPEC_VER_SHIFT; 2960 if (host->version > SDHCI_SPEC_300) { 2961 pr_err("%s: Unknown controller version (%d). " 2962 "You may experience problems.\n", mmc_hostname(mmc), 2963 host->version); 2964 } 2965 2966 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2967 sdhci_readl(host, SDHCI_CAPABILITIES); 2968 2969 if (host->version >= SDHCI_SPEC_300) 2970 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? 2971 host->caps1 : 2972 sdhci_readl(host, SDHCI_CAPABILITIES_1); 2973 2974 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2975 host->flags |= SDHCI_USE_SDMA; 2976 else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) 2977 DBG("Controller doesn't have SDMA capability\n"); 2978 else 2979 host->flags |= SDHCI_USE_SDMA; 2980 2981 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 2982 (host->flags & SDHCI_USE_SDMA)) { 2983 DBG("Disabling DMA as it is marked broken\n"); 2984 host->flags &= ~SDHCI_USE_SDMA; 2985 } 2986 2987 if ((host->version >= SDHCI_SPEC_200) && 2988 (caps[0] & SDHCI_CAN_DO_ADMA2)) 2989 host->flags |= SDHCI_USE_ADMA; 2990 2991 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2992 (host->flags & SDHCI_USE_ADMA)) { 2993 DBG("Disabling ADMA as it is marked broken\n"); 2994 host->flags &= ~SDHCI_USE_ADMA; 2995 } 2996 2997 /* 2998 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask 2999 * and *must* do 64-bit DMA. A driver has the opportunity to change 3000 * that during the first call to ->enable_dma(). Similarly 3001 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to 3002 * implement. 3003 */ 3004 if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) 3005 host->flags |= SDHCI_USE_64_BIT_DMA; 3006 3007 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3008 if (host->ops->enable_dma) { 3009 if (host->ops->enable_dma(host)) { 3010 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 3011 mmc_hostname(mmc)); 3012 host->flags &= 3013 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 3014 } 3015 } 3016 } 3017 3018 /* SDMA does not support 64-bit DMA */ 3019 if (host->flags & SDHCI_USE_64_BIT_DMA) 3020 host->flags &= ~SDHCI_USE_SDMA; 3021 3022 if (host->flags & SDHCI_USE_ADMA) { 3023 /* 3024 * The DMA descriptor table size is calculated as the maximum 3025 * number of segments times 2, to allow for an alignment 3026 * descriptor for each segment, plus 1 for a nop end descriptor, 3027 * all multipled by the descriptor size. 3028 */ 3029 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3030 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 3031 SDHCI_ADMA2_64_DESC_SZ; 3032 host->align_buffer_sz = SDHCI_MAX_SEGS * 3033 SDHCI_ADMA2_64_ALIGN; 3034 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; 3035 host->align_sz = SDHCI_ADMA2_64_ALIGN; 3036 host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; 3037 } else { 3038 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 3039 SDHCI_ADMA2_32_DESC_SZ; 3040 host->align_buffer_sz = SDHCI_MAX_SEGS * 3041 SDHCI_ADMA2_32_ALIGN; 3042 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 3043 host->align_sz = SDHCI_ADMA2_32_ALIGN; 3044 host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; 3045 } 3046 host->adma_table = dma_alloc_coherent(mmc_dev(mmc), 3047 host->adma_table_sz, 3048 &host->adma_addr, 3049 GFP_KERNEL); 3050 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); 3051 if (!host->adma_table || !host->align_buffer) { 3052 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 3053 host->adma_table, host->adma_addr); 3054 kfree(host->align_buffer); 3055 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3056 mmc_hostname(mmc)); 3057 host->flags &= ~SDHCI_USE_ADMA; 3058 host->adma_table = NULL; 3059 host->align_buffer = NULL; 3060 } else if (host->adma_addr & host->align_mask) { 3061 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 3062 mmc_hostname(mmc)); 3063 host->flags &= ~SDHCI_USE_ADMA; 3064 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 3065 host->adma_table, host->adma_addr); 3066 kfree(host->align_buffer); 3067 host->adma_table = NULL; 3068 host->align_buffer = NULL; 3069 } 3070 } 3071 3072 /* 3073 * If we use DMA, then it's up to the caller to set the DMA 3074 * mask, but PIO does not need the hw shim so we set a new 3075 * mask here in that case. 3076 */ 3077 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 3078 host->dma_mask = DMA_BIT_MASK(64); 3079 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3080 } 3081 3082 if (host->version >= SDHCI_SPEC_300) 3083 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) 3084 >> SDHCI_CLOCK_BASE_SHIFT; 3085 else 3086 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) 3087 >> SDHCI_CLOCK_BASE_SHIFT; 3088 3089 host->max_clk *= 1000000; 3090 if (host->max_clk == 0 || host->quirks & 3091 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 3092 if (!host->ops->get_max_clock) { 3093 pr_err("%s: Hardware doesn't specify base clock " 3094 "frequency.\n", mmc_hostname(mmc)); 3095 return -ENODEV; 3096 } 3097 host->max_clk = host->ops->get_max_clock(host); 3098 } 3099 3100 host->next_data.cookie = 1; 3101 /* 3102 * In case of Host Controller v3.00, find out whether clock 3103 * multiplier is supported. 3104 */ 3105 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> 3106 SDHCI_CLOCK_MUL_SHIFT; 3107 3108 /* 3109 * In case the value in Clock Multiplier is 0, then programmable 3110 * clock mode is not supported, otherwise the actual clock 3111 * multiplier is one more than the value of Clock Multiplier 3112 * in the Capabilities Register. 3113 */ 3114 if (host->clk_mul) 3115 host->clk_mul += 1; 3116 3117 /* 3118 * Set host parameters. 3119 */ 3120 mmc->ops = &sdhci_ops; 3121 mmc->f_max = host->max_clk; 3122 if (host->ops->get_min_clock) 3123 mmc->f_min = host->ops->get_min_clock(host); 3124 else if (host->version >= SDHCI_SPEC_300) { 3125 if (host->clk_mul) { 3126 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3127 mmc->f_max = host->max_clk * host->clk_mul; 3128 } else 3129 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3130 } else 3131 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3132 3133 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3134 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3135 SDHCI_TIMEOUT_CLK_SHIFT; 3136 if (host->timeout_clk == 0) { 3137 if (host->ops->get_timeout_clock) { 3138 host->timeout_clk = 3139 host->ops->get_timeout_clock(host); 3140 } else { 3141 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3142 mmc_hostname(mmc)); 3143 return -ENODEV; 3144 } 3145 } 3146 3147 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) 3148 host->timeout_clk *= 1000; 3149 3150 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3151 host->ops->get_max_timeout_count(host) : 1 << 27; 3152 mmc->max_busy_timeout /= host->timeout_clk; 3153 } 3154 3155 if (override_timeout_clk) 3156 host->timeout_clk = override_timeout_clk; 3157 3158 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3159 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3160 3161 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3162 host->flags |= SDHCI_AUTO_CMD12; 3163 3164 /* Auto-CMD23 stuff only works in ADMA or PIO. */ 3165 if ((host->version >= SDHCI_SPEC_300) && 3166 ((host->flags & SDHCI_USE_ADMA) || 3167 !(host->flags & SDHCI_USE_SDMA))) { 3168 host->flags |= SDHCI_AUTO_CMD23; 3169 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); 3170 } else { 3171 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); 3172 } 3173 3174 /* 3175 * A controller may support 8-bit width, but the board itself 3176 * might not have the pins brought out. Boards that support 3177 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3178 * their platform code before calling sdhci_add_host(), and we 3179 * won't assume 8-bit width for hosts without that CAP. 3180 */ 3181 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3182 mmc->caps |= MMC_CAP_4_BIT_DATA; 3183 3184 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3185 mmc->caps &= ~MMC_CAP_CMD23; 3186 3187 if (caps[0] & SDHCI_CAN_DO_HISPD) 3188 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3189 3190 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3191 !(mmc->caps & MMC_CAP_NONREMOVABLE)) 3192 mmc->caps |= MMC_CAP_NEEDS_POLL; 3193 3194 /* If there are external regulators, get them */ 3195 if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER) 3196 return -EPROBE_DEFER; 3197 3198 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ 3199 if (!IS_ERR(mmc->supply.vqmmc)) { 3200 ret = regulator_enable(mmc->supply.vqmmc); 3201 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 3202 1950000)) 3203 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | 3204 SDHCI_SUPPORT_SDR50 | 3205 SDHCI_SUPPORT_DDR50); 3206 if (ret) { 3207 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3208 mmc_hostname(mmc), ret); 3209 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 3210 } 3211 } 3212 3213 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) 3214 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3215 SDHCI_SUPPORT_DDR50); 3216 3217 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 3218 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3219 SDHCI_SUPPORT_DDR50)) 3220 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 3221 3222 /* SDR104 supports also implies SDR50 support */ 3223 if (caps[1] & SDHCI_SUPPORT_SDR104) { 3224 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 3225 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3226 * field can be promoted to support HS200. 3227 */ 3228 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 3229 mmc->caps2 |= MMC_CAP2_HS200; 3230 } else if (caps[1] & SDHCI_SUPPORT_SDR50) 3231 mmc->caps |= MMC_CAP_UHS_SDR50; 3232 3233 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 3234 (caps[1] & SDHCI_SUPPORT_HS400)) 3235 mmc->caps2 |= MMC_CAP2_HS400; 3236 3237 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 3238 (IS_ERR(mmc->supply.vqmmc) || 3239 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 3240 1300000))) 3241 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 3242 3243 if ((caps[1] & SDHCI_SUPPORT_DDR50) && 3244 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3245 mmc->caps |= MMC_CAP_UHS_DDR50; 3246 3247 /* Does the host need tuning for SDR50? */ 3248 if (caps[1] & SDHCI_USE_SDR50_TUNING) 3249 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 3250 3251 /* Does the host need tuning for SDR104 / HS200? */ 3252 if (mmc->caps2 & MMC_CAP2_HS200) 3253 host->flags |= SDHCI_SDR104_NEEDS_TUNING; 3254 3255 /* Driver Type(s) (A, C, D) supported by the host */ 3256 if (caps[1] & SDHCI_DRIVER_TYPE_A) 3257 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 3258 if (caps[1] & SDHCI_DRIVER_TYPE_C) 3259 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 3260 if (caps[1] & SDHCI_DRIVER_TYPE_D) 3261 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 3262 3263 /* Initial value for re-tuning timer count */ 3264 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 3265 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 3266 3267 /* 3268 * In case Re-tuning Timer is not disabled, the actual value of 3269 * re-tuning timer will be 2 ^ (n - 1). 3270 */ 3271 if (host->tuning_count) 3272 host->tuning_count = 1 << (host->tuning_count - 1); 3273 3274 /* Re-tuning mode supported by the Host Controller */ 3275 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> 3276 SDHCI_RETUNING_MODE_SHIFT; 3277 3278 ocr_avail = 0; 3279 3280 /* 3281 * According to SD Host Controller spec v3.00, if the Host System 3282 * can afford more than 150mA, Host Driver should set XPC to 1. Also 3283 * the value is meaningful only if Voltage Support in the Capabilities 3284 * register is set. The actual current value is 4 times the register 3285 * value. 3286 */ 3287 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 3288 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 3289 int curr = regulator_get_current_limit(mmc->supply.vmmc); 3290 if (curr > 0) { 3291 3292 /* convert to SDHCI_MAX_CURRENT format */ 3293 curr = curr/1000; /* convert to mA */ 3294 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 3295 3296 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 3297 max_current_caps = 3298 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 3299 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 3300 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 3301 } 3302 } 3303 3304 if (caps[0] & SDHCI_CAN_VDD_330) { 3305 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 3306 3307 mmc->max_current_330 = ((max_current_caps & 3308 SDHCI_MAX_CURRENT_330_MASK) >> 3309 SDHCI_MAX_CURRENT_330_SHIFT) * 3310 SDHCI_MAX_CURRENT_MULTIPLIER; 3311 } 3312 if (caps[0] & SDHCI_CAN_VDD_300) { 3313 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 3314 3315 mmc->max_current_300 = ((max_current_caps & 3316 SDHCI_MAX_CURRENT_300_MASK) >> 3317 SDHCI_MAX_CURRENT_300_SHIFT) * 3318 SDHCI_MAX_CURRENT_MULTIPLIER; 3319 } 3320 if (caps[0] & SDHCI_CAN_VDD_180) { 3321 ocr_avail |= MMC_VDD_165_195; 3322 3323 mmc->max_current_180 = ((max_current_caps & 3324 SDHCI_MAX_CURRENT_180_MASK) >> 3325 SDHCI_MAX_CURRENT_180_SHIFT) * 3326 SDHCI_MAX_CURRENT_MULTIPLIER; 3327 } 3328 3329 /* If OCR set by external regulators, use it instead */ 3330 if (mmc->ocr_avail) 3331 ocr_avail = mmc->ocr_avail; 3332 3333 if (host->ocr_mask) 3334 ocr_avail &= host->ocr_mask; 3335 3336 mmc->ocr_avail = ocr_avail; 3337 mmc->ocr_avail_sdio = ocr_avail; 3338 if (host->ocr_avail_sdio) 3339 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 3340 mmc->ocr_avail_sd = ocr_avail; 3341 if (host->ocr_avail_sd) 3342 mmc->ocr_avail_sd &= host->ocr_avail_sd; 3343 else /* normal SD controllers don't support 1.8V */ 3344 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 3345 mmc->ocr_avail_mmc = ocr_avail; 3346 if (host->ocr_avail_mmc) 3347 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 3348 3349 if (mmc->ocr_avail == 0) { 3350 pr_err("%s: Hardware doesn't report any " 3351 "support voltages.\n", mmc_hostname(mmc)); 3352 return -ENODEV; 3353 } 3354 3355 spin_lock_init(&host->lock); 3356 3357 /* 3358 * Maximum number of segments. Depends on if the hardware 3359 * can do scatter/gather or not. 3360 */ 3361 if (host->flags & SDHCI_USE_ADMA) 3362 mmc->max_segs = SDHCI_MAX_SEGS; 3363 else if (host->flags & SDHCI_USE_SDMA) 3364 mmc->max_segs = 1; 3365 else /* PIO */ 3366 mmc->max_segs = SDHCI_MAX_SEGS; 3367 3368 /* 3369 * Maximum number of sectors in one transfer. Limited by SDMA boundary 3370 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 3371 * is less anyway. 3372 */ 3373 mmc->max_req_size = 524288; 3374 3375 /* 3376 * Maximum segment size. Could be one segment with the maximum number 3377 * of bytes. When doing hardware scatter/gather, each entry cannot 3378 * be larger than 64 KiB though. 3379 */ 3380 if (host->flags & SDHCI_USE_ADMA) { 3381 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 3382 mmc->max_seg_size = 65535; 3383 else 3384 mmc->max_seg_size = 65536; 3385 } else { 3386 mmc->max_seg_size = mmc->max_req_size; 3387 } 3388 3389 /* 3390 * Maximum block size. This varies from controller to controller and 3391 * is specified in the capabilities register. 3392 */ 3393 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 3394 mmc->max_blk_size = 2; 3395 } else { 3396 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> 3397 SDHCI_MAX_BLOCK_SHIFT; 3398 if (mmc->max_blk_size >= 3) { 3399 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 3400 mmc_hostname(mmc)); 3401 mmc->max_blk_size = 0; 3402 } 3403 } 3404 3405 mmc->max_blk_size = 512 << mmc->max_blk_size; 3406 3407 /* 3408 * Maximum block count. 3409 */ 3410 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 3411 3412 /* 3413 * Init tasklets. 3414 */ 3415 tasklet_init(&host->finish_tasklet, 3416 sdhci_tasklet_finish, (unsigned long)host); 3417 3418 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 3419 3420 init_waitqueue_head(&host->buf_ready_int); 3421 3422 if (host->version >= SDHCI_SPEC_300) { 3423 /* Initialize re-tuning timer */ 3424 init_timer(&host->tuning_timer); 3425 host->tuning_timer.data = (unsigned long)host; 3426 host->tuning_timer.function = sdhci_tuning_timer; 3427 } 3428 3429 sdhci_init(host, 0); 3430 3431 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 3432 IRQF_SHARED, mmc_hostname(mmc), host); 3433 if (ret) { 3434 pr_err("%s: Failed to request IRQ %d: %d\n", 3435 mmc_hostname(mmc), host->irq, ret); 3436 goto untasklet; 3437 } 3438 3439 #ifdef CONFIG_MMC_DEBUG 3440 sdhci_dumpregs(host); 3441 #endif 3442 3443 #ifdef SDHCI_USE_LEDS_CLASS 3444 snprintf(host->led_name, sizeof(host->led_name), 3445 "%s::", mmc_hostname(mmc)); 3446 host->led.name = host->led_name; 3447 host->led.brightness = LED_OFF; 3448 host->led.default_trigger = mmc_hostname(mmc); 3449 host->led.brightness_set = sdhci_led_control; 3450 3451 ret = led_classdev_register(mmc_dev(mmc), &host->led); 3452 if (ret) { 3453 pr_err("%s: Failed to register LED device: %d\n", 3454 mmc_hostname(mmc), ret); 3455 goto reset; 3456 } 3457 #endif 3458 3459 mmiowb(); 3460 3461 mmc_add_host(mmc); 3462 3463 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3464 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3465 (host->flags & SDHCI_USE_ADMA) ? 3466 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 3467 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3468 3469 sdhci_enable_card_detection(host); 3470 3471 return 0; 3472 3473 #ifdef SDHCI_USE_LEDS_CLASS 3474 reset: 3475 sdhci_do_reset(host, SDHCI_RESET_ALL); 3476 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3477 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3478 free_irq(host->irq, host); 3479 #endif 3480 untasklet: 3481 tasklet_kill(&host->finish_tasklet); 3482 3483 return ret; 3484 } 3485 3486 EXPORT_SYMBOL_GPL(sdhci_add_host); 3487 3488 void sdhci_remove_host(struct sdhci_host *host, int dead) 3489 { 3490 struct mmc_host *mmc = host->mmc; 3491 unsigned long flags; 3492 3493 if (dead) { 3494 spin_lock_irqsave(&host->lock, flags); 3495 3496 host->flags |= SDHCI_DEVICE_DEAD; 3497 3498 if (host->mrq) { 3499 pr_err("%s: Controller removed during " 3500 " transfer!\n", mmc_hostname(mmc)); 3501 3502 host->mrq->cmd->error = -ENOMEDIUM; 3503 tasklet_schedule(&host->finish_tasklet); 3504 } 3505 3506 spin_unlock_irqrestore(&host->lock, flags); 3507 } 3508 3509 sdhci_disable_card_detection(host); 3510 3511 mmc_remove_host(mmc); 3512 3513 #ifdef SDHCI_USE_LEDS_CLASS 3514 led_classdev_unregister(&host->led); 3515 #endif 3516 3517 if (!dead) 3518 sdhci_do_reset(host, SDHCI_RESET_ALL); 3519 3520 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3521 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3522 free_irq(host->irq, host); 3523 3524 del_timer_sync(&host->timer); 3525 3526 tasklet_kill(&host->finish_tasklet); 3527 3528 if (!IS_ERR(mmc->supply.vqmmc)) 3529 regulator_disable(mmc->supply.vqmmc); 3530 3531 if (host->adma_table) 3532 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 3533 host->adma_table, host->adma_addr); 3534 kfree(host->align_buffer); 3535 3536 host->adma_table = NULL; 3537 host->align_buffer = NULL; 3538 } 3539 3540 EXPORT_SYMBOL_GPL(sdhci_remove_host); 3541 3542 void sdhci_free_host(struct sdhci_host *host) 3543 { 3544 mmc_free_host(host->mmc); 3545 } 3546 3547 EXPORT_SYMBOL_GPL(sdhci_free_host); 3548 3549 /*****************************************************************************\ 3550 * * 3551 * Driver init/exit * 3552 * * 3553 \*****************************************************************************/ 3554 3555 static int __init sdhci_drv_init(void) 3556 { 3557 pr_info(DRIVER_NAME 3558 ": Secure Digital Host Controller Interface driver\n"); 3559 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 3560 3561 return 0; 3562 } 3563 3564 static void __exit sdhci_drv_exit(void) 3565 { 3566 } 3567 3568 module_init(sdhci_drv_init); 3569 module_exit(sdhci_drv_exit); 3570 3571 module_param(debug_quirks, uint, 0444); 3572 module_param(debug_quirks2, uint, 0444); 3573 3574 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 3575 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 3576 MODULE_LICENSE("GPL"); 3577 3578 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 3579 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 3580