1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/highmem.h> 18 #include <linux/io.h> 19 #include <linux/module.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/slab.h> 22 #include <linux/scatterlist.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 26 #include <linux/leds.h> 27 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/host.h> 30 31 #include "sdhci.h" 32 33 #define DRIVER_NAME "sdhci" 34 35 #define DBG(f, x...) \ 36 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 37 38 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 39 defined(CONFIG_MMC_SDHCI_MODULE)) 40 #define SDHCI_USE_LEDS_CLASS 41 #endif 42 43 #define MAX_TUNING_LOOP 40 44 45 static unsigned int debug_quirks = 0; 46 static unsigned int debug_quirks2; 47 48 static void sdhci_finish_data(struct sdhci_host *); 49 50 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 51 static void sdhci_finish_command(struct sdhci_host *); 52 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); 53 static void sdhci_tuning_timer(unsigned long data); 54 55 #ifdef CONFIG_PM_RUNTIME 56 static int sdhci_runtime_pm_get(struct sdhci_host *host); 57 static int sdhci_runtime_pm_put(struct sdhci_host *host); 58 #else 59 static inline int sdhci_runtime_pm_get(struct sdhci_host *host) 60 { 61 return 0; 62 } 63 static inline int sdhci_runtime_pm_put(struct sdhci_host *host) 64 { 65 return 0; 66 } 67 #endif 68 69 static void sdhci_dumpregs(struct sdhci_host *host) 70 { 71 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 72 mmc_hostname(host->mmc)); 73 74 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 75 sdhci_readl(host, SDHCI_DMA_ADDRESS), 76 sdhci_readw(host, SDHCI_HOST_VERSION)); 77 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 78 sdhci_readw(host, SDHCI_BLOCK_SIZE), 79 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 80 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 81 sdhci_readl(host, SDHCI_ARGUMENT), 82 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 83 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 84 sdhci_readl(host, SDHCI_PRESENT_STATE), 85 sdhci_readb(host, SDHCI_HOST_CONTROL)); 86 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 87 sdhci_readb(host, SDHCI_POWER_CONTROL), 88 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 89 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 90 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 91 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 92 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 93 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 94 sdhci_readl(host, SDHCI_INT_STATUS)); 95 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 96 sdhci_readl(host, SDHCI_INT_ENABLE), 97 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 98 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 99 sdhci_readw(host, SDHCI_ACMD12_ERR), 100 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 101 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 102 sdhci_readl(host, SDHCI_CAPABILITIES), 103 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 104 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 105 sdhci_readw(host, SDHCI_COMMAND), 106 sdhci_readl(host, SDHCI_MAX_CURRENT)); 107 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 108 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 109 110 if (host->flags & SDHCI_USE_ADMA) 111 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 112 readl(host->ioaddr + SDHCI_ADMA_ERROR), 113 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 114 115 pr_debug(DRIVER_NAME ": ===========================================\n"); 116 } 117 118 /*****************************************************************************\ 119 * * 120 * Low level functions * 121 * * 122 \*****************************************************************************/ 123 124 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) 125 { 126 u32 ier; 127 128 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 129 ier &= ~clear; 130 ier |= set; 131 sdhci_writel(host, ier, SDHCI_INT_ENABLE); 132 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); 133 } 134 135 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) 136 { 137 sdhci_clear_set_irqs(host, 0, irqs); 138 } 139 140 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) 141 { 142 sdhci_clear_set_irqs(host, irqs, 0); 143 } 144 145 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 146 { 147 u32 present, irqs; 148 149 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 150 (host->mmc->caps & MMC_CAP_NONREMOVABLE)) 151 return; 152 153 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 154 SDHCI_CARD_PRESENT; 155 irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT; 156 157 if (enable) 158 sdhci_unmask_irqs(host, irqs); 159 else 160 sdhci_mask_irqs(host, irqs); 161 } 162 163 static void sdhci_enable_card_detection(struct sdhci_host *host) 164 { 165 sdhci_set_card_detection(host, true); 166 } 167 168 static void sdhci_disable_card_detection(struct sdhci_host *host) 169 { 170 sdhci_set_card_detection(host, false); 171 } 172 173 static void sdhci_reset(struct sdhci_host *host, u8 mask) 174 { 175 unsigned long timeout; 176 u32 uninitialized_var(ier); 177 178 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 179 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 180 SDHCI_CARD_PRESENT)) 181 return; 182 } 183 184 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 185 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 186 187 if (host->ops->platform_reset_enter) 188 host->ops->platform_reset_enter(host, mask); 189 190 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 191 192 if (mask & SDHCI_RESET_ALL) 193 host->clock = 0; 194 195 /* Wait max 100 ms */ 196 timeout = 100; 197 198 /* hw clears the bit when it's done */ 199 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 200 if (timeout == 0) { 201 pr_err("%s: Reset 0x%x never completed.\n", 202 mmc_hostname(host->mmc), (int)mask); 203 sdhci_dumpregs(host); 204 return; 205 } 206 timeout--; 207 mdelay(1); 208 } 209 210 if (host->ops->platform_reset_exit) 211 host->ops->platform_reset_exit(host, mask); 212 213 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 214 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 215 216 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 217 if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL)) 218 host->ops->enable_dma(host); 219 } 220 } 221 222 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 223 224 static void sdhci_init(struct sdhci_host *host, int soft) 225 { 226 if (soft) 227 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 228 else 229 sdhci_reset(host, SDHCI_RESET_ALL); 230 231 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, 232 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 233 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 234 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 235 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); 236 237 if (soft) { 238 /* force clock reconfiguration */ 239 host->clock = 0; 240 sdhci_set_ios(host->mmc, &host->mmc->ios); 241 } 242 } 243 244 static void sdhci_reinit(struct sdhci_host *host) 245 { 246 sdhci_init(host, 0); 247 sdhci_enable_card_detection(host); 248 } 249 250 static void sdhci_activate_led(struct sdhci_host *host) 251 { 252 u8 ctrl; 253 254 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 255 ctrl |= SDHCI_CTRL_LED; 256 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 257 } 258 259 static void sdhci_deactivate_led(struct sdhci_host *host) 260 { 261 u8 ctrl; 262 263 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 264 ctrl &= ~SDHCI_CTRL_LED; 265 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 266 } 267 268 #ifdef SDHCI_USE_LEDS_CLASS 269 static void sdhci_led_control(struct led_classdev *led, 270 enum led_brightness brightness) 271 { 272 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 273 unsigned long flags; 274 275 spin_lock_irqsave(&host->lock, flags); 276 277 if (host->runtime_suspended) 278 goto out; 279 280 if (brightness == LED_OFF) 281 sdhci_deactivate_led(host); 282 else 283 sdhci_activate_led(host); 284 out: 285 spin_unlock_irqrestore(&host->lock, flags); 286 } 287 #endif 288 289 /*****************************************************************************\ 290 * * 291 * Core functions * 292 * * 293 \*****************************************************************************/ 294 295 static void sdhci_read_block_pio(struct sdhci_host *host) 296 { 297 unsigned long flags; 298 size_t blksize, len, chunk; 299 u32 uninitialized_var(scratch); 300 u8 *buf; 301 302 DBG("PIO reading\n"); 303 304 blksize = host->data->blksz; 305 chunk = 0; 306 307 local_irq_save(flags); 308 309 while (blksize) { 310 if (!sg_miter_next(&host->sg_miter)) 311 BUG(); 312 313 len = min(host->sg_miter.length, blksize); 314 315 blksize -= len; 316 host->sg_miter.consumed = len; 317 318 buf = host->sg_miter.addr; 319 320 while (len) { 321 if (chunk == 0) { 322 scratch = sdhci_readl(host, SDHCI_BUFFER); 323 chunk = 4; 324 } 325 326 *buf = scratch & 0xFF; 327 328 buf++; 329 scratch >>= 8; 330 chunk--; 331 len--; 332 } 333 } 334 335 sg_miter_stop(&host->sg_miter); 336 337 local_irq_restore(flags); 338 } 339 340 static void sdhci_write_block_pio(struct sdhci_host *host) 341 { 342 unsigned long flags; 343 size_t blksize, len, chunk; 344 u32 scratch; 345 u8 *buf; 346 347 DBG("PIO writing\n"); 348 349 blksize = host->data->blksz; 350 chunk = 0; 351 scratch = 0; 352 353 local_irq_save(flags); 354 355 while (blksize) { 356 if (!sg_miter_next(&host->sg_miter)) 357 BUG(); 358 359 len = min(host->sg_miter.length, blksize); 360 361 blksize -= len; 362 host->sg_miter.consumed = len; 363 364 buf = host->sg_miter.addr; 365 366 while (len) { 367 scratch |= (u32)*buf << (chunk * 8); 368 369 buf++; 370 chunk++; 371 len--; 372 373 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 374 sdhci_writel(host, scratch, SDHCI_BUFFER); 375 chunk = 0; 376 scratch = 0; 377 } 378 } 379 } 380 381 sg_miter_stop(&host->sg_miter); 382 383 local_irq_restore(flags); 384 } 385 386 static void sdhci_transfer_pio(struct sdhci_host *host) 387 { 388 u32 mask; 389 390 BUG_ON(!host->data); 391 392 if (host->blocks == 0) 393 return; 394 395 if (host->data->flags & MMC_DATA_READ) 396 mask = SDHCI_DATA_AVAILABLE; 397 else 398 mask = SDHCI_SPACE_AVAILABLE; 399 400 /* 401 * Some controllers (JMicron JMB38x) mess up the buffer bits 402 * for transfers < 4 bytes. As long as it is just one block, 403 * we can ignore the bits. 404 */ 405 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 406 (host->data->blocks == 1)) 407 mask = ~0; 408 409 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 410 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 411 udelay(100); 412 413 if (host->data->flags & MMC_DATA_READ) 414 sdhci_read_block_pio(host); 415 else 416 sdhci_write_block_pio(host); 417 418 host->blocks--; 419 if (host->blocks == 0) 420 break; 421 } 422 423 DBG("PIO transfer complete.\n"); 424 } 425 426 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 427 { 428 local_irq_save(*flags); 429 return kmap_atomic(sg_page(sg)) + sg->offset; 430 } 431 432 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 433 { 434 kunmap_atomic(buffer); 435 local_irq_restore(*flags); 436 } 437 438 static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) 439 { 440 __le32 *dataddr = (__le32 __force *)(desc + 4); 441 __le16 *cmdlen = (__le16 __force *)desc; 442 443 /* SDHCI specification says ADMA descriptors should be 4 byte 444 * aligned, so using 16 or 32bit operations should be safe. */ 445 446 cmdlen[0] = cpu_to_le16(cmd); 447 cmdlen[1] = cpu_to_le16(len); 448 449 dataddr[0] = cpu_to_le32(addr); 450 } 451 452 static int sdhci_adma_table_pre(struct sdhci_host *host, 453 struct mmc_data *data) 454 { 455 int direction; 456 457 u8 *desc; 458 u8 *align; 459 dma_addr_t addr; 460 dma_addr_t align_addr; 461 int len, offset; 462 463 struct scatterlist *sg; 464 int i; 465 char *buffer; 466 unsigned long flags; 467 468 /* 469 * The spec does not specify endianness of descriptor table. 470 * We currently guess that it is LE. 471 */ 472 473 if (data->flags & MMC_DATA_READ) 474 direction = DMA_FROM_DEVICE; 475 else 476 direction = DMA_TO_DEVICE; 477 478 /* 479 * The ADMA descriptor table is mapped further down as we 480 * need to fill it with data first. 481 */ 482 483 host->align_addr = dma_map_single(mmc_dev(host->mmc), 484 host->align_buffer, 128 * 4, direction); 485 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 486 goto fail; 487 BUG_ON(host->align_addr & 0x3); 488 489 host->sg_count = dma_map_sg(mmc_dev(host->mmc), 490 data->sg, data->sg_len, direction); 491 if (host->sg_count == 0) 492 goto unmap_align; 493 494 desc = host->adma_desc; 495 align = host->align_buffer; 496 497 align_addr = host->align_addr; 498 499 for_each_sg(data->sg, sg, host->sg_count, i) { 500 addr = sg_dma_address(sg); 501 len = sg_dma_len(sg); 502 503 /* 504 * The SDHCI specification states that ADMA 505 * addresses must be 32-bit aligned. If they 506 * aren't, then we use a bounce buffer for 507 * the (up to three) bytes that screw up the 508 * alignment. 509 */ 510 offset = (4 - (addr & 0x3)) & 0x3; 511 if (offset) { 512 if (data->flags & MMC_DATA_WRITE) { 513 buffer = sdhci_kmap_atomic(sg, &flags); 514 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 515 memcpy(align, buffer, offset); 516 sdhci_kunmap_atomic(buffer, &flags); 517 } 518 519 /* tran, valid */ 520 sdhci_set_adma_desc(desc, align_addr, offset, 0x21); 521 522 BUG_ON(offset > 65536); 523 524 align += 4; 525 align_addr += 4; 526 527 desc += 8; 528 529 addr += offset; 530 len -= offset; 531 } 532 533 BUG_ON(len > 65536); 534 535 /* tran, valid */ 536 sdhci_set_adma_desc(desc, addr, len, 0x21); 537 desc += 8; 538 539 /* 540 * If this triggers then we have a calculation bug 541 * somewhere. :/ 542 */ 543 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 544 } 545 546 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 547 /* 548 * Mark the last descriptor as the terminating descriptor 549 */ 550 if (desc != host->adma_desc) { 551 desc -= 8; 552 desc[0] |= 0x2; /* end */ 553 } 554 } else { 555 /* 556 * Add a terminating entry. 557 */ 558 559 /* nop, end, valid */ 560 sdhci_set_adma_desc(desc, 0, 0, 0x3); 561 } 562 563 /* 564 * Resync align buffer as we might have changed it. 565 */ 566 if (data->flags & MMC_DATA_WRITE) { 567 dma_sync_single_for_device(mmc_dev(host->mmc), 568 host->align_addr, 128 * 4, direction); 569 } 570 571 host->adma_addr = dma_map_single(mmc_dev(host->mmc), 572 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 573 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) 574 goto unmap_entries; 575 BUG_ON(host->adma_addr & 0x3); 576 577 return 0; 578 579 unmap_entries: 580 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 581 data->sg_len, direction); 582 unmap_align: 583 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 584 128 * 4, direction); 585 fail: 586 return -EINVAL; 587 } 588 589 static void sdhci_adma_table_post(struct sdhci_host *host, 590 struct mmc_data *data) 591 { 592 int direction; 593 594 struct scatterlist *sg; 595 int i, size; 596 u8 *align; 597 char *buffer; 598 unsigned long flags; 599 600 if (data->flags & MMC_DATA_READ) 601 direction = DMA_FROM_DEVICE; 602 else 603 direction = DMA_TO_DEVICE; 604 605 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, 606 (128 * 2 + 1) * 4, DMA_TO_DEVICE); 607 608 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 609 128 * 4, direction); 610 611 if (data->flags & MMC_DATA_READ) { 612 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 613 data->sg_len, direction); 614 615 align = host->align_buffer; 616 617 for_each_sg(data->sg, sg, host->sg_count, i) { 618 if (sg_dma_address(sg) & 0x3) { 619 size = 4 - (sg_dma_address(sg) & 0x3); 620 621 buffer = sdhci_kmap_atomic(sg, &flags); 622 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 623 memcpy(buffer, align, size); 624 sdhci_kunmap_atomic(buffer, &flags); 625 626 align += 4; 627 } 628 } 629 } 630 631 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 632 data->sg_len, direction); 633 } 634 635 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) 636 { 637 u8 count; 638 struct mmc_data *data = cmd->data; 639 unsigned target_timeout, current_timeout; 640 641 /* 642 * If the host controller provides us with an incorrect timeout 643 * value, just skip the check and use 0xE. The hardware may take 644 * longer to time out, but that's much better than having a too-short 645 * timeout value. 646 */ 647 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 648 return 0xE; 649 650 /* Unspecified timeout, assume max */ 651 if (!data && !cmd->cmd_timeout_ms) 652 return 0xE; 653 654 /* timeout in us */ 655 if (!data) 656 target_timeout = cmd->cmd_timeout_ms * 1000; 657 else { 658 target_timeout = data->timeout_ns / 1000; 659 if (host->clock) 660 target_timeout += data->timeout_clks / host->clock; 661 } 662 663 /* 664 * Figure out needed cycles. 665 * We do this in steps in order to fit inside a 32 bit int. 666 * The first step is the minimum timeout, which will have a 667 * minimum resolution of 6 bits: 668 * (1) 2^13*1000 > 2^22, 669 * (2) host->timeout_clk < 2^16 670 * => 671 * (1) / (2) > 2^6 672 */ 673 count = 0; 674 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 675 while (current_timeout < target_timeout) { 676 count++; 677 current_timeout <<= 1; 678 if (count >= 0xF) 679 break; 680 } 681 682 if (count >= 0xF) { 683 pr_warning("%s: Too large timeout requested for CMD%d!\n", 684 mmc_hostname(host->mmc), cmd->opcode); 685 count = 0xE; 686 } 687 688 return count; 689 } 690 691 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 692 { 693 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 694 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 695 696 if (host->flags & SDHCI_REQ_USE_DMA) 697 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); 698 else 699 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 700 } 701 702 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 703 { 704 u8 count; 705 u8 ctrl; 706 struct mmc_data *data = cmd->data; 707 int ret; 708 709 WARN_ON(host->data); 710 711 if (data || (cmd->flags & MMC_RSP_BUSY)) { 712 count = sdhci_calc_timeout(host, cmd); 713 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 714 } 715 716 if (!data) 717 return; 718 719 /* Sanity checks */ 720 BUG_ON(data->blksz * data->blocks > 524288); 721 BUG_ON(data->blksz > host->mmc->max_blk_size); 722 BUG_ON(data->blocks > 65535); 723 724 host->data = data; 725 host->data_early = 0; 726 host->data->bytes_xfered = 0; 727 728 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 729 host->flags |= SDHCI_REQ_USE_DMA; 730 731 /* 732 * FIXME: This doesn't account for merging when mapping the 733 * scatterlist. 734 */ 735 if (host->flags & SDHCI_REQ_USE_DMA) { 736 int broken, i; 737 struct scatterlist *sg; 738 739 broken = 0; 740 if (host->flags & SDHCI_USE_ADMA) { 741 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 742 broken = 1; 743 } else { 744 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 745 broken = 1; 746 } 747 748 if (unlikely(broken)) { 749 for_each_sg(data->sg, sg, data->sg_len, i) { 750 if (sg->length & 0x3) { 751 DBG("Reverting to PIO because of " 752 "transfer size (%d)\n", 753 sg->length); 754 host->flags &= ~SDHCI_REQ_USE_DMA; 755 break; 756 } 757 } 758 } 759 } 760 761 /* 762 * The assumption here being that alignment is the same after 763 * translation to device address space. 764 */ 765 if (host->flags & SDHCI_REQ_USE_DMA) { 766 int broken, i; 767 struct scatterlist *sg; 768 769 broken = 0; 770 if (host->flags & SDHCI_USE_ADMA) { 771 /* 772 * As we use 3 byte chunks to work around 773 * alignment problems, we need to check this 774 * quirk. 775 */ 776 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 777 broken = 1; 778 } else { 779 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 780 broken = 1; 781 } 782 783 if (unlikely(broken)) { 784 for_each_sg(data->sg, sg, data->sg_len, i) { 785 if (sg->offset & 0x3) { 786 DBG("Reverting to PIO because of " 787 "bad alignment\n"); 788 host->flags &= ~SDHCI_REQ_USE_DMA; 789 break; 790 } 791 } 792 } 793 } 794 795 if (host->flags & SDHCI_REQ_USE_DMA) { 796 if (host->flags & SDHCI_USE_ADMA) { 797 ret = sdhci_adma_table_pre(host, data); 798 if (ret) { 799 /* 800 * This only happens when someone fed 801 * us an invalid request. 802 */ 803 WARN_ON(1); 804 host->flags &= ~SDHCI_REQ_USE_DMA; 805 } else { 806 sdhci_writel(host, host->adma_addr, 807 SDHCI_ADMA_ADDRESS); 808 } 809 } else { 810 int sg_cnt; 811 812 sg_cnt = dma_map_sg(mmc_dev(host->mmc), 813 data->sg, data->sg_len, 814 (data->flags & MMC_DATA_READ) ? 815 DMA_FROM_DEVICE : 816 DMA_TO_DEVICE); 817 if (sg_cnt == 0) { 818 /* 819 * This only happens when someone fed 820 * us an invalid request. 821 */ 822 WARN_ON(1); 823 host->flags &= ~SDHCI_REQ_USE_DMA; 824 } else { 825 WARN_ON(sg_cnt != 1); 826 sdhci_writel(host, sg_dma_address(data->sg), 827 SDHCI_DMA_ADDRESS); 828 } 829 } 830 } 831 832 /* 833 * Always adjust the DMA selection as some controllers 834 * (e.g. JMicron) can't do PIO properly when the selection 835 * is ADMA. 836 */ 837 if (host->version >= SDHCI_SPEC_200) { 838 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 839 ctrl &= ~SDHCI_CTRL_DMA_MASK; 840 if ((host->flags & SDHCI_REQ_USE_DMA) && 841 (host->flags & SDHCI_USE_ADMA)) 842 ctrl |= SDHCI_CTRL_ADMA32; 843 else 844 ctrl |= SDHCI_CTRL_SDMA; 845 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 846 } 847 848 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 849 int flags; 850 851 flags = SG_MITER_ATOMIC; 852 if (host->data->flags & MMC_DATA_READ) 853 flags |= SG_MITER_TO_SG; 854 else 855 flags |= SG_MITER_FROM_SG; 856 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 857 host->blocks = data->blocks; 858 } 859 860 sdhci_set_transfer_irqs(host); 861 862 /* Set the DMA boundary value and block size */ 863 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 864 data->blksz), SDHCI_BLOCK_SIZE); 865 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 866 } 867 868 static void sdhci_set_transfer_mode(struct sdhci_host *host, 869 struct mmc_command *cmd) 870 { 871 u16 mode; 872 struct mmc_data *data = cmd->data; 873 874 if (data == NULL) 875 return; 876 877 WARN_ON(!host->data); 878 879 mode = SDHCI_TRNS_BLK_CNT_EN; 880 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 881 mode |= SDHCI_TRNS_MULTI; 882 /* 883 * If we are sending CMD23, CMD12 never gets sent 884 * on successful completion (so no Auto-CMD12). 885 */ 886 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) 887 mode |= SDHCI_TRNS_AUTO_CMD12; 888 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 889 mode |= SDHCI_TRNS_AUTO_CMD23; 890 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); 891 } 892 } 893 894 if (data->flags & MMC_DATA_READ) 895 mode |= SDHCI_TRNS_READ; 896 if (host->flags & SDHCI_REQ_USE_DMA) 897 mode |= SDHCI_TRNS_DMA; 898 899 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 900 } 901 902 static void sdhci_finish_data(struct sdhci_host *host) 903 { 904 struct mmc_data *data; 905 906 BUG_ON(!host->data); 907 908 data = host->data; 909 host->data = NULL; 910 911 if (host->flags & SDHCI_REQ_USE_DMA) { 912 if (host->flags & SDHCI_USE_ADMA) 913 sdhci_adma_table_post(host, data); 914 else { 915 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 916 data->sg_len, (data->flags & MMC_DATA_READ) ? 917 DMA_FROM_DEVICE : DMA_TO_DEVICE); 918 } 919 } 920 921 /* 922 * The specification states that the block count register must 923 * be updated, but it does not specify at what point in the 924 * data flow. That makes the register entirely useless to read 925 * back so we have to assume that nothing made it to the card 926 * in the event of an error. 927 */ 928 if (data->error) 929 data->bytes_xfered = 0; 930 else 931 data->bytes_xfered = data->blksz * data->blocks; 932 933 /* 934 * Need to send CMD12 if - 935 * a) open-ended multiblock transfer (no CMD23) 936 * b) error in multiblock transfer 937 */ 938 if (data->stop && 939 (data->error || 940 !host->mrq->sbc)) { 941 942 /* 943 * The controller needs a reset of internal state machines 944 * upon error conditions. 945 */ 946 if (data->error) { 947 sdhci_reset(host, SDHCI_RESET_CMD); 948 sdhci_reset(host, SDHCI_RESET_DATA); 949 } 950 951 sdhci_send_command(host, data->stop); 952 } else 953 tasklet_schedule(&host->finish_tasklet); 954 } 955 956 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 957 { 958 int flags; 959 u32 mask; 960 unsigned long timeout; 961 962 WARN_ON(host->cmd); 963 964 /* Wait max 10 ms */ 965 timeout = 10; 966 967 mask = SDHCI_CMD_INHIBIT; 968 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 969 mask |= SDHCI_DATA_INHIBIT; 970 971 /* We shouldn't wait for data inihibit for stop commands, even 972 though they might use busy signaling */ 973 if (host->mrq->data && (cmd == host->mrq->data->stop)) 974 mask &= ~SDHCI_DATA_INHIBIT; 975 976 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 977 if (timeout == 0) { 978 pr_err("%s: Controller never released " 979 "inhibit bit(s).\n", mmc_hostname(host->mmc)); 980 sdhci_dumpregs(host); 981 cmd->error = -EIO; 982 tasklet_schedule(&host->finish_tasklet); 983 return; 984 } 985 timeout--; 986 mdelay(1); 987 } 988 989 mod_timer(&host->timer, jiffies + 10 * HZ); 990 991 host->cmd = cmd; 992 993 sdhci_prepare_data(host, cmd); 994 995 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 996 997 sdhci_set_transfer_mode(host, cmd); 998 999 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1000 pr_err("%s: Unsupported response type!\n", 1001 mmc_hostname(host->mmc)); 1002 cmd->error = -EINVAL; 1003 tasklet_schedule(&host->finish_tasklet); 1004 return; 1005 } 1006 1007 if (!(cmd->flags & MMC_RSP_PRESENT)) 1008 flags = SDHCI_CMD_RESP_NONE; 1009 else if (cmd->flags & MMC_RSP_136) 1010 flags = SDHCI_CMD_RESP_LONG; 1011 else if (cmd->flags & MMC_RSP_BUSY) 1012 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1013 else 1014 flags = SDHCI_CMD_RESP_SHORT; 1015 1016 if (cmd->flags & MMC_RSP_CRC) 1017 flags |= SDHCI_CMD_CRC; 1018 if (cmd->flags & MMC_RSP_OPCODE) 1019 flags |= SDHCI_CMD_INDEX; 1020 1021 /* CMD19 is special in that the Data Present Select should be set */ 1022 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1023 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1024 flags |= SDHCI_CMD_DATA; 1025 1026 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1027 } 1028 1029 static void sdhci_finish_command(struct sdhci_host *host) 1030 { 1031 int i; 1032 1033 BUG_ON(host->cmd == NULL); 1034 1035 if (host->cmd->flags & MMC_RSP_PRESENT) { 1036 if (host->cmd->flags & MMC_RSP_136) { 1037 /* CRC is stripped so we need to do some shifting. */ 1038 for (i = 0;i < 4;i++) { 1039 host->cmd->resp[i] = sdhci_readl(host, 1040 SDHCI_RESPONSE + (3-i)*4) << 8; 1041 if (i != 3) 1042 host->cmd->resp[i] |= 1043 sdhci_readb(host, 1044 SDHCI_RESPONSE + (3-i)*4-1); 1045 } 1046 } else { 1047 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1048 } 1049 } 1050 1051 host->cmd->error = 0; 1052 1053 /* Finished CMD23, now send actual command. */ 1054 if (host->cmd == host->mrq->sbc) { 1055 host->cmd = NULL; 1056 sdhci_send_command(host, host->mrq->cmd); 1057 } else { 1058 1059 /* Processed actual command. */ 1060 if (host->data && host->data_early) 1061 sdhci_finish_data(host); 1062 1063 if (!host->cmd->data) 1064 tasklet_schedule(&host->finish_tasklet); 1065 1066 host->cmd = NULL; 1067 } 1068 } 1069 1070 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1071 { 1072 int div = 0; /* Initialized for compiler warning */ 1073 int real_div = div, clk_mul = 1; 1074 u16 clk = 0; 1075 unsigned long timeout; 1076 1077 if (clock && clock == host->clock) 1078 return; 1079 1080 host->mmc->actual_clock = 0; 1081 1082 if (host->ops->set_clock) { 1083 host->ops->set_clock(host, clock); 1084 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) 1085 return; 1086 } 1087 1088 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1089 1090 if (clock == 0) 1091 goto out; 1092 1093 if (host->version >= SDHCI_SPEC_300) { 1094 /* 1095 * Check if the Host Controller supports Programmable Clock 1096 * Mode. 1097 */ 1098 if (host->clk_mul) { 1099 u16 ctrl; 1100 1101 /* 1102 * We need to figure out whether the Host Driver needs 1103 * to select Programmable Clock Mode, or the value can 1104 * be set automatically by the Host Controller based on 1105 * the Preset Value registers. 1106 */ 1107 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1108 if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1109 for (div = 1; div <= 1024; div++) { 1110 if (((host->max_clk * host->clk_mul) / 1111 div) <= clock) 1112 break; 1113 } 1114 /* 1115 * Set Programmable Clock Mode in the Clock 1116 * Control register. 1117 */ 1118 clk = SDHCI_PROG_CLOCK_MODE; 1119 real_div = div; 1120 clk_mul = host->clk_mul; 1121 div--; 1122 } 1123 } else { 1124 /* Version 3.00 divisors must be a multiple of 2. */ 1125 if (host->max_clk <= clock) 1126 div = 1; 1127 else { 1128 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1129 div += 2) { 1130 if ((host->max_clk / div) <= clock) 1131 break; 1132 } 1133 } 1134 real_div = div; 1135 div >>= 1; 1136 } 1137 } else { 1138 /* Version 2.00 divisors must be a power of 2. */ 1139 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1140 if ((host->max_clk / div) <= clock) 1141 break; 1142 } 1143 real_div = div; 1144 div >>= 1; 1145 } 1146 1147 if (real_div) 1148 host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; 1149 1150 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1151 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1152 << SDHCI_DIVIDER_HI_SHIFT; 1153 clk |= SDHCI_CLOCK_INT_EN; 1154 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1155 1156 /* Wait max 20 ms */ 1157 timeout = 20; 1158 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1159 & SDHCI_CLOCK_INT_STABLE)) { 1160 if (timeout == 0) { 1161 pr_err("%s: Internal clock never " 1162 "stabilised.\n", mmc_hostname(host->mmc)); 1163 sdhci_dumpregs(host); 1164 return; 1165 } 1166 timeout--; 1167 mdelay(1); 1168 } 1169 1170 clk |= SDHCI_CLOCK_CARD_EN; 1171 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1172 1173 out: 1174 host->clock = clock; 1175 } 1176 1177 static int sdhci_set_power(struct sdhci_host *host, unsigned short power) 1178 { 1179 u8 pwr = 0; 1180 1181 if (power != (unsigned short)-1) { 1182 switch (1 << power) { 1183 case MMC_VDD_165_195: 1184 pwr = SDHCI_POWER_180; 1185 break; 1186 case MMC_VDD_29_30: 1187 case MMC_VDD_30_31: 1188 pwr = SDHCI_POWER_300; 1189 break; 1190 case MMC_VDD_32_33: 1191 case MMC_VDD_33_34: 1192 pwr = SDHCI_POWER_330; 1193 break; 1194 default: 1195 BUG(); 1196 } 1197 } 1198 1199 if (host->pwr == pwr) 1200 return -1; 1201 1202 host->pwr = pwr; 1203 1204 if (pwr == 0) { 1205 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1206 return 0; 1207 } 1208 1209 /* 1210 * Spec says that we should clear the power reg before setting 1211 * a new value. Some controllers don't seem to like this though. 1212 */ 1213 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1214 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1215 1216 /* 1217 * At least the Marvell CaFe chip gets confused if we set the voltage 1218 * and set turn on power at the same time, so set the voltage first. 1219 */ 1220 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1221 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1222 1223 pwr |= SDHCI_POWER_ON; 1224 1225 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1226 1227 /* 1228 * Some controllers need an extra 10ms delay of 10ms before they 1229 * can apply clock after applying power 1230 */ 1231 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1232 mdelay(10); 1233 1234 return power; 1235 } 1236 1237 /*****************************************************************************\ 1238 * * 1239 * MMC callbacks * 1240 * * 1241 \*****************************************************************************/ 1242 1243 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1244 { 1245 struct sdhci_host *host; 1246 bool present; 1247 unsigned long flags; 1248 1249 host = mmc_priv(mmc); 1250 1251 sdhci_runtime_pm_get(host); 1252 1253 spin_lock_irqsave(&host->lock, flags); 1254 1255 WARN_ON(host->mrq != NULL); 1256 1257 #ifndef SDHCI_USE_LEDS_CLASS 1258 sdhci_activate_led(host); 1259 #endif 1260 1261 /* 1262 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1263 * requests if Auto-CMD12 is enabled. 1264 */ 1265 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 1266 if (mrq->stop) { 1267 mrq->data->stop = NULL; 1268 mrq->stop = NULL; 1269 } 1270 } 1271 1272 host->mrq = mrq; 1273 1274 /* If polling, assume that the card is always present. */ 1275 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1276 present = true; 1277 else 1278 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1279 SDHCI_CARD_PRESENT; 1280 1281 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1282 host->mrq->cmd->error = -ENOMEDIUM; 1283 tasklet_schedule(&host->finish_tasklet); 1284 } else { 1285 u32 present_state; 1286 1287 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1288 /* 1289 * Check if the re-tuning timer has already expired and there 1290 * is no on-going data transfer. If so, we need to execute 1291 * tuning procedure before sending command. 1292 */ 1293 if ((host->flags & SDHCI_NEEDS_RETUNING) && 1294 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { 1295 spin_unlock_irqrestore(&host->lock, flags); 1296 sdhci_execute_tuning(mmc, mrq->cmd->opcode); 1297 spin_lock_irqsave(&host->lock, flags); 1298 1299 /* Restore original mmc_request structure */ 1300 host->mrq = mrq; 1301 } 1302 1303 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1304 sdhci_send_command(host, mrq->sbc); 1305 else 1306 sdhci_send_command(host, mrq->cmd); 1307 } 1308 1309 mmiowb(); 1310 spin_unlock_irqrestore(&host->lock, flags); 1311 } 1312 1313 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) 1314 { 1315 unsigned long flags; 1316 int vdd_bit = -1; 1317 u8 ctrl; 1318 1319 spin_lock_irqsave(&host->lock, flags); 1320 1321 if (host->flags & SDHCI_DEVICE_DEAD) { 1322 spin_unlock_irqrestore(&host->lock, flags); 1323 if (host->vmmc && ios->power_mode == MMC_POWER_OFF) 1324 mmc_regulator_set_ocr(host->mmc, host->vmmc, 0); 1325 return; 1326 } 1327 1328 /* 1329 * Reset the chip on each power off. 1330 * Should clear out any weird states. 1331 */ 1332 if (ios->power_mode == MMC_POWER_OFF) { 1333 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1334 sdhci_reinit(host); 1335 } 1336 1337 sdhci_set_clock(host, ios->clock); 1338 1339 if (ios->power_mode == MMC_POWER_OFF) 1340 vdd_bit = sdhci_set_power(host, -1); 1341 else 1342 vdd_bit = sdhci_set_power(host, ios->vdd); 1343 1344 if (host->vmmc && vdd_bit != -1) { 1345 spin_unlock_irqrestore(&host->lock, flags); 1346 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit); 1347 spin_lock_irqsave(&host->lock, flags); 1348 } 1349 1350 if (host->ops->platform_send_init_74_clocks) 1351 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1352 1353 /* 1354 * If your platform has 8-bit width support but is not a v3 controller, 1355 * or if it requires special setup code, you should implement that in 1356 * platform_8bit_width(). 1357 */ 1358 if (host->ops->platform_8bit_width) 1359 host->ops->platform_8bit_width(host, ios->bus_width); 1360 else { 1361 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1362 if (ios->bus_width == MMC_BUS_WIDTH_8) { 1363 ctrl &= ~SDHCI_CTRL_4BITBUS; 1364 if (host->version >= SDHCI_SPEC_300) 1365 ctrl |= SDHCI_CTRL_8BITBUS; 1366 } else { 1367 if (host->version >= SDHCI_SPEC_300) 1368 ctrl &= ~SDHCI_CTRL_8BITBUS; 1369 if (ios->bus_width == MMC_BUS_WIDTH_4) 1370 ctrl |= SDHCI_CTRL_4BITBUS; 1371 else 1372 ctrl &= ~SDHCI_CTRL_4BITBUS; 1373 } 1374 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1375 } 1376 1377 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1378 1379 if ((ios->timing == MMC_TIMING_SD_HS || 1380 ios->timing == MMC_TIMING_MMC_HS) 1381 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1382 ctrl |= SDHCI_CTRL_HISPD; 1383 else 1384 ctrl &= ~SDHCI_CTRL_HISPD; 1385 1386 if (host->version >= SDHCI_SPEC_300) { 1387 u16 clk, ctrl_2; 1388 unsigned int clock; 1389 1390 /* In case of UHS-I modes, set High Speed Enable */ 1391 if ((ios->timing == MMC_TIMING_MMC_HS200) || 1392 (ios->timing == MMC_TIMING_UHS_SDR50) || 1393 (ios->timing == MMC_TIMING_UHS_SDR104) || 1394 (ios->timing == MMC_TIMING_UHS_DDR50) || 1395 (ios->timing == MMC_TIMING_UHS_SDR25)) 1396 ctrl |= SDHCI_CTRL_HISPD; 1397 1398 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1399 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1400 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1401 /* 1402 * We only need to set Driver Strength if the 1403 * preset value enable is not set. 1404 */ 1405 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1406 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1407 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1408 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1409 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1410 1411 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1412 } else { 1413 /* 1414 * According to SDHC Spec v3.00, if the Preset Value 1415 * Enable in the Host Control 2 register is set, we 1416 * need to reset SD Clock Enable before changing High 1417 * Speed Enable to avoid generating clock gliches. 1418 */ 1419 1420 /* Reset SD Clock Enable */ 1421 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1422 clk &= ~SDHCI_CLOCK_CARD_EN; 1423 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1424 1425 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1426 1427 /* Re-enable SD Clock */ 1428 clock = host->clock; 1429 host->clock = 0; 1430 sdhci_set_clock(host, clock); 1431 } 1432 1433 1434 /* Reset SD Clock Enable */ 1435 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1436 clk &= ~SDHCI_CLOCK_CARD_EN; 1437 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1438 1439 if (host->ops->set_uhs_signaling) 1440 host->ops->set_uhs_signaling(host, ios->timing); 1441 else { 1442 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1443 /* Select Bus Speed Mode for host */ 1444 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1445 if (ios->timing == MMC_TIMING_MMC_HS200) 1446 ctrl_2 |= SDHCI_CTRL_HS_SDR200; 1447 else if (ios->timing == MMC_TIMING_UHS_SDR12) 1448 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1449 else if (ios->timing == MMC_TIMING_UHS_SDR25) 1450 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1451 else if (ios->timing == MMC_TIMING_UHS_SDR50) 1452 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1453 else if (ios->timing == MMC_TIMING_UHS_SDR104) 1454 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1455 else if (ios->timing == MMC_TIMING_UHS_DDR50) 1456 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1457 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1458 } 1459 1460 /* Re-enable SD Clock */ 1461 clock = host->clock; 1462 host->clock = 0; 1463 sdhci_set_clock(host, clock); 1464 } else 1465 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1466 1467 /* 1468 * Some (ENE) controllers go apeshit on some ios operation, 1469 * signalling timeout and CRC errors even on CMD0. Resetting 1470 * it on each ios seems to solve the problem. 1471 */ 1472 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1473 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1474 1475 mmiowb(); 1476 spin_unlock_irqrestore(&host->lock, flags); 1477 } 1478 1479 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1480 { 1481 struct sdhci_host *host = mmc_priv(mmc); 1482 1483 sdhci_runtime_pm_get(host); 1484 sdhci_do_set_ios(host, ios); 1485 sdhci_runtime_pm_put(host); 1486 } 1487 1488 static int sdhci_check_ro(struct sdhci_host *host) 1489 { 1490 unsigned long flags; 1491 int is_readonly; 1492 1493 spin_lock_irqsave(&host->lock, flags); 1494 1495 if (host->flags & SDHCI_DEVICE_DEAD) 1496 is_readonly = 0; 1497 else if (host->ops->get_ro) 1498 is_readonly = host->ops->get_ro(host); 1499 else 1500 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1501 & SDHCI_WRITE_PROTECT); 1502 1503 spin_unlock_irqrestore(&host->lock, flags); 1504 1505 /* This quirk needs to be replaced by a callback-function later */ 1506 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1507 !is_readonly : is_readonly; 1508 } 1509 1510 #define SAMPLE_COUNT 5 1511 1512 static int sdhci_do_get_ro(struct sdhci_host *host) 1513 { 1514 int i, ro_count; 1515 1516 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1517 return sdhci_check_ro(host); 1518 1519 ro_count = 0; 1520 for (i = 0; i < SAMPLE_COUNT; i++) { 1521 if (sdhci_check_ro(host)) { 1522 if (++ro_count > SAMPLE_COUNT / 2) 1523 return 1; 1524 } 1525 msleep(30); 1526 } 1527 return 0; 1528 } 1529 1530 static void sdhci_hw_reset(struct mmc_host *mmc) 1531 { 1532 struct sdhci_host *host = mmc_priv(mmc); 1533 1534 if (host->ops && host->ops->hw_reset) 1535 host->ops->hw_reset(host); 1536 } 1537 1538 static int sdhci_get_ro(struct mmc_host *mmc) 1539 { 1540 struct sdhci_host *host = mmc_priv(mmc); 1541 int ret; 1542 1543 sdhci_runtime_pm_get(host); 1544 ret = sdhci_do_get_ro(host); 1545 sdhci_runtime_pm_put(host); 1546 return ret; 1547 } 1548 1549 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1550 { 1551 if (host->flags & SDHCI_DEVICE_DEAD) 1552 goto out; 1553 1554 if (enable) 1555 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1556 else 1557 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1558 1559 /* SDIO IRQ will be enabled as appropriate in runtime resume */ 1560 if (host->runtime_suspended) 1561 goto out; 1562 1563 if (enable) 1564 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); 1565 else 1566 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); 1567 out: 1568 mmiowb(); 1569 } 1570 1571 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1572 { 1573 struct sdhci_host *host = mmc_priv(mmc); 1574 unsigned long flags; 1575 1576 spin_lock_irqsave(&host->lock, flags); 1577 sdhci_enable_sdio_irq_nolock(host, enable); 1578 spin_unlock_irqrestore(&host->lock, flags); 1579 } 1580 1581 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, 1582 struct mmc_ios *ios) 1583 { 1584 u8 pwr; 1585 u16 clk, ctrl; 1586 u32 present_state; 1587 1588 /* 1589 * Signal Voltage Switching is only applicable for Host Controllers 1590 * v3.00 and above. 1591 */ 1592 if (host->version < SDHCI_SPEC_300) 1593 return 0; 1594 1595 /* 1596 * We first check whether the request is to set signalling voltage 1597 * to 3.3V. If so, we change the voltage to 3.3V and return quickly. 1598 */ 1599 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1600 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1601 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1602 ctrl &= ~SDHCI_CTRL_VDD_180; 1603 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1604 1605 /* Wait for 5ms */ 1606 usleep_range(5000, 5500); 1607 1608 /* 3.3V regulator output should be stable within 5 ms */ 1609 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1610 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1611 return 0; 1612 else { 1613 pr_info(DRIVER_NAME ": Switching to 3.3V " 1614 "signalling voltage failed\n"); 1615 return -EIO; 1616 } 1617 } else if (!(ctrl & SDHCI_CTRL_VDD_180) && 1618 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { 1619 /* Stop SDCLK */ 1620 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1621 clk &= ~SDHCI_CLOCK_CARD_EN; 1622 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1623 1624 /* Check whether DAT[3:0] is 0000 */ 1625 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 1626 if (!((present_state & SDHCI_DATA_LVL_MASK) >> 1627 SDHCI_DATA_LVL_SHIFT)) { 1628 /* 1629 * Enable 1.8V Signal Enable in the Host Control2 1630 * register 1631 */ 1632 ctrl |= SDHCI_CTRL_VDD_180; 1633 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1634 1635 /* Wait for 5ms */ 1636 usleep_range(5000, 5500); 1637 1638 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1639 if (ctrl & SDHCI_CTRL_VDD_180) { 1640 /* Provide SDCLK again and wait for 1ms*/ 1641 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1642 clk |= SDHCI_CLOCK_CARD_EN; 1643 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1644 usleep_range(1000, 1500); 1645 1646 /* 1647 * If DAT[3:0] level is 1111b, then the card 1648 * was successfully switched to 1.8V signaling. 1649 */ 1650 present_state = sdhci_readl(host, 1651 SDHCI_PRESENT_STATE); 1652 if ((present_state & SDHCI_DATA_LVL_MASK) == 1653 SDHCI_DATA_LVL_MASK) 1654 return 0; 1655 } 1656 } 1657 1658 /* 1659 * If we are here, that means the switch to 1.8V signaling 1660 * failed. We power cycle the card, and retry initialization 1661 * sequence by setting S18R to 0. 1662 */ 1663 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); 1664 pwr &= ~SDHCI_POWER_ON; 1665 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1666 1667 /* Wait for 1ms as per the spec */ 1668 usleep_range(1000, 1500); 1669 pwr |= SDHCI_POWER_ON; 1670 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1671 1672 pr_info(DRIVER_NAME ": Switching to 1.8V signalling " 1673 "voltage failed, retrying with S18R set to 0\n"); 1674 return -EAGAIN; 1675 } else 1676 /* No signal voltage switch required */ 1677 return 0; 1678 } 1679 1680 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1681 struct mmc_ios *ios) 1682 { 1683 struct sdhci_host *host = mmc_priv(mmc); 1684 int err; 1685 1686 if (host->version < SDHCI_SPEC_300) 1687 return 0; 1688 sdhci_runtime_pm_get(host); 1689 err = sdhci_do_start_signal_voltage_switch(host, ios); 1690 sdhci_runtime_pm_put(host); 1691 return err; 1692 } 1693 1694 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1695 { 1696 struct sdhci_host *host; 1697 u16 ctrl; 1698 u32 ier; 1699 int tuning_loop_counter = MAX_TUNING_LOOP; 1700 unsigned long timeout; 1701 int err = 0; 1702 bool requires_tuning_nonuhs = false; 1703 1704 host = mmc_priv(mmc); 1705 1706 sdhci_runtime_pm_get(host); 1707 disable_irq(host->irq); 1708 spin_lock(&host->lock); 1709 1710 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1711 1712 /* 1713 * The Host Controller needs tuning only in case of SDR104 mode 1714 * and for SDR50 mode when Use Tuning for SDR50 is set in the 1715 * Capabilities register. 1716 * If the Host Controller supports the HS200 mode then the 1717 * tuning function has to be executed. 1718 */ 1719 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && 1720 (host->flags & SDHCI_SDR50_NEEDS_TUNING || 1721 host->flags & SDHCI_HS200_NEEDS_TUNING)) 1722 requires_tuning_nonuhs = true; 1723 1724 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || 1725 requires_tuning_nonuhs) 1726 ctrl |= SDHCI_CTRL_EXEC_TUNING; 1727 else { 1728 spin_unlock(&host->lock); 1729 enable_irq(host->irq); 1730 sdhci_runtime_pm_put(host); 1731 return 0; 1732 } 1733 1734 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1735 1736 /* 1737 * As per the Host Controller spec v3.00, tuning command 1738 * generates Buffer Read Ready interrupt, so enable that. 1739 * 1740 * Note: The spec clearly says that when tuning sequence 1741 * is being performed, the controller does not generate 1742 * interrupts other than Buffer Read Ready interrupt. But 1743 * to make sure we don't hit a controller bug, we _only_ 1744 * enable Buffer Read Ready interrupt here. 1745 */ 1746 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 1747 sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); 1748 1749 /* 1750 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number 1751 * of loops reaches 40 times or a timeout of 150ms occurs. 1752 */ 1753 timeout = 150; 1754 do { 1755 struct mmc_command cmd = {0}; 1756 struct mmc_request mrq = {NULL}; 1757 1758 if (!tuning_loop_counter && !timeout) 1759 break; 1760 1761 cmd.opcode = opcode; 1762 cmd.arg = 0; 1763 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1764 cmd.retries = 0; 1765 cmd.data = NULL; 1766 cmd.error = 0; 1767 1768 mrq.cmd = &cmd; 1769 host->mrq = &mrq; 1770 1771 /* 1772 * In response to CMD19, the card sends 64 bytes of tuning 1773 * block to the Host Controller. So we set the block size 1774 * to 64 here. 1775 */ 1776 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { 1777 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) 1778 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), 1779 SDHCI_BLOCK_SIZE); 1780 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) 1781 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1782 SDHCI_BLOCK_SIZE); 1783 } else { 1784 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), 1785 SDHCI_BLOCK_SIZE); 1786 } 1787 1788 /* 1789 * The tuning block is sent by the card to the host controller. 1790 * So we set the TRNS_READ bit in the Transfer Mode register. 1791 * This also takes care of setting DMA Enable and Multi Block 1792 * Select in the same register to 0. 1793 */ 1794 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 1795 1796 sdhci_send_command(host, &cmd); 1797 1798 host->cmd = NULL; 1799 host->mrq = NULL; 1800 1801 spin_unlock(&host->lock); 1802 enable_irq(host->irq); 1803 1804 /* Wait for Buffer Read Ready interrupt */ 1805 wait_event_interruptible_timeout(host->buf_ready_int, 1806 (host->tuning_done == 1), 1807 msecs_to_jiffies(50)); 1808 disable_irq(host->irq); 1809 spin_lock(&host->lock); 1810 1811 if (!host->tuning_done) { 1812 pr_info(DRIVER_NAME ": Timeout waiting for " 1813 "Buffer Read Ready interrupt during tuning " 1814 "procedure, falling back to fixed sampling " 1815 "clock\n"); 1816 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1817 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1818 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 1819 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1820 1821 err = -EIO; 1822 goto out; 1823 } 1824 1825 host->tuning_done = 0; 1826 1827 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1828 tuning_loop_counter--; 1829 timeout--; 1830 mdelay(1); 1831 } while (ctrl & SDHCI_CTRL_EXEC_TUNING); 1832 1833 /* 1834 * The Host Driver has exhausted the maximum number of loops allowed, 1835 * so use fixed sampling frequency. 1836 */ 1837 if (!tuning_loop_counter || !timeout) { 1838 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 1839 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1840 } else { 1841 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { 1842 pr_info(DRIVER_NAME ": Tuning procedure" 1843 " failed, falling back to fixed sampling" 1844 " clock\n"); 1845 err = -EIO; 1846 } 1847 } 1848 1849 out: 1850 /* 1851 * If this is the very first time we are here, we start the retuning 1852 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING 1853 * flag won't be set, we check this condition before actually starting 1854 * the timer. 1855 */ 1856 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && 1857 (host->tuning_mode == SDHCI_TUNING_MODE_1)) { 1858 mod_timer(&host->tuning_timer, jiffies + 1859 host->tuning_count * HZ); 1860 /* Tuning mode 1 limits the maximum data length to 4MB */ 1861 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; 1862 } else { 1863 host->flags &= ~SDHCI_NEEDS_RETUNING; 1864 /* Reload the new initial value for timer */ 1865 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 1866 mod_timer(&host->tuning_timer, jiffies + 1867 host->tuning_count * HZ); 1868 } 1869 1870 /* 1871 * In case tuning fails, host controllers which support re-tuning can 1872 * try tuning again at a later time, when the re-tuning timer expires. 1873 * So for these controllers, we return 0. Since there might be other 1874 * controllers who do not have this capability, we return error for 1875 * them. 1876 */ 1877 if (err && host->tuning_count && 1878 host->tuning_mode == SDHCI_TUNING_MODE_1) 1879 err = 0; 1880 1881 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); 1882 spin_unlock(&host->lock); 1883 enable_irq(host->irq); 1884 sdhci_runtime_pm_put(host); 1885 1886 return err; 1887 } 1888 1889 static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) 1890 { 1891 u16 ctrl; 1892 unsigned long flags; 1893 1894 /* Host Controller v3.00 defines preset value registers */ 1895 if (host->version < SDHCI_SPEC_300) 1896 return; 1897 1898 spin_lock_irqsave(&host->lock, flags); 1899 1900 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1901 1902 /* 1903 * We only enable or disable Preset Value if they are not already 1904 * enabled or disabled respectively. Otherwise, we bail out. 1905 */ 1906 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1907 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 1908 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1909 host->flags |= SDHCI_PV_ENABLED; 1910 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { 1911 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 1912 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1913 host->flags &= ~SDHCI_PV_ENABLED; 1914 } 1915 1916 spin_unlock_irqrestore(&host->lock, flags); 1917 } 1918 1919 static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) 1920 { 1921 struct sdhci_host *host = mmc_priv(mmc); 1922 1923 sdhci_runtime_pm_get(host); 1924 sdhci_do_enable_preset_value(host, enable); 1925 sdhci_runtime_pm_put(host); 1926 } 1927 1928 static const struct mmc_host_ops sdhci_ops = { 1929 .request = sdhci_request, 1930 .set_ios = sdhci_set_ios, 1931 .get_ro = sdhci_get_ro, 1932 .hw_reset = sdhci_hw_reset, 1933 .enable_sdio_irq = sdhci_enable_sdio_irq, 1934 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 1935 .execute_tuning = sdhci_execute_tuning, 1936 .enable_preset_value = sdhci_enable_preset_value, 1937 }; 1938 1939 /*****************************************************************************\ 1940 * * 1941 * Tasklets * 1942 * * 1943 \*****************************************************************************/ 1944 1945 static void sdhci_tasklet_card(unsigned long param) 1946 { 1947 struct sdhci_host *host; 1948 unsigned long flags; 1949 1950 host = (struct sdhci_host*)param; 1951 1952 spin_lock_irqsave(&host->lock, flags); 1953 1954 /* Check host->mrq first in case we are runtime suspended */ 1955 if (host->mrq && 1956 !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1957 pr_err("%s: Card removed during transfer!\n", 1958 mmc_hostname(host->mmc)); 1959 pr_err("%s: Resetting controller.\n", 1960 mmc_hostname(host->mmc)); 1961 1962 sdhci_reset(host, SDHCI_RESET_CMD); 1963 sdhci_reset(host, SDHCI_RESET_DATA); 1964 1965 host->mrq->cmd->error = -ENOMEDIUM; 1966 tasklet_schedule(&host->finish_tasklet); 1967 } 1968 1969 spin_unlock_irqrestore(&host->lock, flags); 1970 1971 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 1972 } 1973 1974 static void sdhci_tasklet_finish(unsigned long param) 1975 { 1976 struct sdhci_host *host; 1977 unsigned long flags; 1978 struct mmc_request *mrq; 1979 1980 host = (struct sdhci_host*)param; 1981 1982 spin_lock_irqsave(&host->lock, flags); 1983 1984 /* 1985 * If this tasklet gets rescheduled while running, it will 1986 * be run again afterwards but without any active request. 1987 */ 1988 if (!host->mrq) { 1989 spin_unlock_irqrestore(&host->lock, flags); 1990 return; 1991 } 1992 1993 del_timer(&host->timer); 1994 1995 mrq = host->mrq; 1996 1997 /* 1998 * The controller needs a reset of internal state machines 1999 * upon error conditions. 2000 */ 2001 if (!(host->flags & SDHCI_DEVICE_DEAD) && 2002 ((mrq->cmd && mrq->cmd->error) || 2003 (mrq->data && (mrq->data->error || 2004 (mrq->data->stop && mrq->data->stop->error))) || 2005 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2006 2007 /* Some controllers need this kick or reset won't work here */ 2008 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 2009 unsigned int clock; 2010 2011 /* This is to force an update */ 2012 clock = host->clock; 2013 host->clock = 0; 2014 sdhci_set_clock(host, clock); 2015 } 2016 2017 /* Spec says we should do both at the same time, but Ricoh 2018 controllers do not like that. */ 2019 sdhci_reset(host, SDHCI_RESET_CMD); 2020 sdhci_reset(host, SDHCI_RESET_DATA); 2021 } 2022 2023 host->mrq = NULL; 2024 host->cmd = NULL; 2025 host->data = NULL; 2026 2027 #ifndef SDHCI_USE_LEDS_CLASS 2028 sdhci_deactivate_led(host); 2029 #endif 2030 2031 mmiowb(); 2032 spin_unlock_irqrestore(&host->lock, flags); 2033 2034 mmc_request_done(host->mmc, mrq); 2035 sdhci_runtime_pm_put(host); 2036 } 2037 2038 static void sdhci_timeout_timer(unsigned long data) 2039 { 2040 struct sdhci_host *host; 2041 unsigned long flags; 2042 2043 host = (struct sdhci_host*)data; 2044 2045 spin_lock_irqsave(&host->lock, flags); 2046 2047 if (host->mrq) { 2048 pr_err("%s: Timeout waiting for hardware " 2049 "interrupt.\n", mmc_hostname(host->mmc)); 2050 sdhci_dumpregs(host); 2051 2052 if (host->data) { 2053 host->data->error = -ETIMEDOUT; 2054 sdhci_finish_data(host); 2055 } else { 2056 if (host->cmd) 2057 host->cmd->error = -ETIMEDOUT; 2058 else 2059 host->mrq->cmd->error = -ETIMEDOUT; 2060 2061 tasklet_schedule(&host->finish_tasklet); 2062 } 2063 } 2064 2065 mmiowb(); 2066 spin_unlock_irqrestore(&host->lock, flags); 2067 } 2068 2069 static void sdhci_tuning_timer(unsigned long data) 2070 { 2071 struct sdhci_host *host; 2072 unsigned long flags; 2073 2074 host = (struct sdhci_host *)data; 2075 2076 spin_lock_irqsave(&host->lock, flags); 2077 2078 host->flags |= SDHCI_NEEDS_RETUNING; 2079 2080 spin_unlock_irqrestore(&host->lock, flags); 2081 } 2082 2083 /*****************************************************************************\ 2084 * * 2085 * Interrupt handling * 2086 * * 2087 \*****************************************************************************/ 2088 2089 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) 2090 { 2091 BUG_ON(intmask == 0); 2092 2093 if (!host->cmd) { 2094 pr_err("%s: Got command interrupt 0x%08x even " 2095 "though no command operation was in progress.\n", 2096 mmc_hostname(host->mmc), (unsigned)intmask); 2097 sdhci_dumpregs(host); 2098 return; 2099 } 2100 2101 if (intmask & SDHCI_INT_TIMEOUT) 2102 host->cmd->error = -ETIMEDOUT; 2103 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | 2104 SDHCI_INT_INDEX)) 2105 host->cmd->error = -EILSEQ; 2106 2107 if (host->cmd->error) { 2108 tasklet_schedule(&host->finish_tasklet); 2109 return; 2110 } 2111 2112 /* 2113 * The host can send and interrupt when the busy state has 2114 * ended, allowing us to wait without wasting CPU cycles. 2115 * Unfortunately this is overloaded on the "data complete" 2116 * interrupt, so we need to take some care when handling 2117 * it. 2118 * 2119 * Note: The 1.0 specification is a bit ambiguous about this 2120 * feature so there might be some problems with older 2121 * controllers. 2122 */ 2123 if (host->cmd->flags & MMC_RSP_BUSY) { 2124 if (host->cmd->data) 2125 DBG("Cannot wait for busy signal when also " 2126 "doing a data transfer"); 2127 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)) 2128 return; 2129 2130 /* The controller does not support the end-of-busy IRQ, 2131 * fall through and take the SDHCI_INT_RESPONSE */ 2132 } 2133 2134 if (intmask & SDHCI_INT_RESPONSE) 2135 sdhci_finish_command(host); 2136 } 2137 2138 #ifdef CONFIG_MMC_DEBUG 2139 static void sdhci_show_adma_error(struct sdhci_host *host) 2140 { 2141 const char *name = mmc_hostname(host->mmc); 2142 u8 *desc = host->adma_desc; 2143 __le32 *dma; 2144 __le16 *len; 2145 u8 attr; 2146 2147 sdhci_dumpregs(host); 2148 2149 while (true) { 2150 dma = (__le32 *)(desc + 4); 2151 len = (__le16 *)(desc + 2); 2152 attr = *desc; 2153 2154 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2155 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); 2156 2157 desc += 8; 2158 2159 if (attr & 2) 2160 break; 2161 } 2162 } 2163 #else 2164 static void sdhci_show_adma_error(struct sdhci_host *host) { } 2165 #endif 2166 2167 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2168 { 2169 u32 command; 2170 BUG_ON(intmask == 0); 2171 2172 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2173 if (intmask & SDHCI_INT_DATA_AVAIL) { 2174 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2175 if (command == MMC_SEND_TUNING_BLOCK || 2176 command == MMC_SEND_TUNING_BLOCK_HS200) { 2177 host->tuning_done = 1; 2178 wake_up(&host->buf_ready_int); 2179 return; 2180 } 2181 } 2182 2183 if (!host->data) { 2184 /* 2185 * The "data complete" interrupt is also used to 2186 * indicate that a busy state has ended. See comment 2187 * above in sdhci_cmd_irq(). 2188 */ 2189 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { 2190 if (intmask & SDHCI_INT_DATA_END) { 2191 sdhci_finish_command(host); 2192 return; 2193 } 2194 } 2195 2196 pr_err("%s: Got data interrupt 0x%08x even " 2197 "though no data operation was in progress.\n", 2198 mmc_hostname(host->mmc), (unsigned)intmask); 2199 sdhci_dumpregs(host); 2200 2201 return; 2202 } 2203 2204 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2205 host->data->error = -ETIMEDOUT; 2206 else if (intmask & SDHCI_INT_DATA_END_BIT) 2207 host->data->error = -EILSEQ; 2208 else if ((intmask & SDHCI_INT_DATA_CRC) && 2209 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2210 != MMC_BUS_TEST_R) 2211 host->data->error = -EILSEQ; 2212 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2213 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2214 sdhci_show_adma_error(host); 2215 host->data->error = -EIO; 2216 } 2217 2218 if (host->data->error) 2219 sdhci_finish_data(host); 2220 else { 2221 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2222 sdhci_transfer_pio(host); 2223 2224 /* 2225 * We currently don't do anything fancy with DMA 2226 * boundaries, but as we can't disable the feature 2227 * we need to at least restart the transfer. 2228 * 2229 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2230 * should return a valid address to continue from, but as 2231 * some controllers are faulty, don't trust them. 2232 */ 2233 if (intmask & SDHCI_INT_DMA_END) { 2234 u32 dmastart, dmanow; 2235 dmastart = sg_dma_address(host->data->sg); 2236 dmanow = dmastart + host->data->bytes_xfered; 2237 /* 2238 * Force update to the next DMA block boundary. 2239 */ 2240 dmanow = (dmanow & 2241 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2242 SDHCI_DEFAULT_BOUNDARY_SIZE; 2243 host->data->bytes_xfered = dmanow - dmastart; 2244 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," 2245 " next 0x%08x\n", 2246 mmc_hostname(host->mmc), dmastart, 2247 host->data->bytes_xfered, dmanow); 2248 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2249 } 2250 2251 if (intmask & SDHCI_INT_DATA_END) { 2252 if (host->cmd) { 2253 /* 2254 * Data managed to finish before the 2255 * command completed. Make sure we do 2256 * things in the proper order. 2257 */ 2258 host->data_early = 1; 2259 } else { 2260 sdhci_finish_data(host); 2261 } 2262 } 2263 } 2264 } 2265 2266 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2267 { 2268 irqreturn_t result; 2269 struct sdhci_host *host = dev_id; 2270 u32 intmask, unexpected = 0; 2271 int cardint = 0, max_loops = 16; 2272 2273 spin_lock(&host->lock); 2274 2275 if (host->runtime_suspended) { 2276 spin_unlock(&host->lock); 2277 pr_warning("%s: got irq while runtime suspended\n", 2278 mmc_hostname(host->mmc)); 2279 return IRQ_HANDLED; 2280 } 2281 2282 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2283 2284 if (!intmask || intmask == 0xffffffff) { 2285 result = IRQ_NONE; 2286 goto out; 2287 } 2288 2289 again: 2290 DBG("*** %s got interrupt: 0x%08x\n", 2291 mmc_hostname(host->mmc), intmask); 2292 2293 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2294 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2295 SDHCI_CARD_PRESENT; 2296 2297 /* 2298 * There is a observation on i.mx esdhc. INSERT bit will be 2299 * immediately set again when it gets cleared, if a card is 2300 * inserted. We have to mask the irq to prevent interrupt 2301 * storm which will freeze the system. And the REMOVE gets 2302 * the same situation. 2303 * 2304 * More testing are needed here to ensure it works for other 2305 * platforms though. 2306 */ 2307 sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT : 2308 SDHCI_INT_CARD_REMOVE); 2309 sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE : 2310 SDHCI_INT_CARD_INSERT); 2311 2312 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2313 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2314 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 2315 tasklet_schedule(&host->card_tasklet); 2316 } 2317 2318 if (intmask & SDHCI_INT_CMD_MASK) { 2319 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, 2320 SDHCI_INT_STATUS); 2321 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 2322 } 2323 2324 if (intmask & SDHCI_INT_DATA_MASK) { 2325 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, 2326 SDHCI_INT_STATUS); 2327 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2328 } 2329 2330 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 2331 2332 intmask &= ~SDHCI_INT_ERROR; 2333 2334 if (intmask & SDHCI_INT_BUS_POWER) { 2335 pr_err("%s: Card is consuming too much power!\n", 2336 mmc_hostname(host->mmc)); 2337 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); 2338 } 2339 2340 intmask &= ~SDHCI_INT_BUS_POWER; 2341 2342 if (intmask & SDHCI_INT_CARD_INT) 2343 cardint = 1; 2344 2345 intmask &= ~SDHCI_INT_CARD_INT; 2346 2347 if (intmask) { 2348 unexpected |= intmask; 2349 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2350 } 2351 2352 result = IRQ_HANDLED; 2353 2354 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2355 if (intmask && --max_loops) 2356 goto again; 2357 out: 2358 spin_unlock(&host->lock); 2359 2360 if (unexpected) { 2361 pr_err("%s: Unexpected interrupt 0x%08x.\n", 2362 mmc_hostname(host->mmc), unexpected); 2363 sdhci_dumpregs(host); 2364 } 2365 /* 2366 * We have to delay this as it calls back into the driver. 2367 */ 2368 if (cardint) 2369 mmc_signal_sdio_irq(host->mmc); 2370 2371 return result; 2372 } 2373 2374 /*****************************************************************************\ 2375 * * 2376 * Suspend/resume * 2377 * * 2378 \*****************************************************************************/ 2379 2380 #ifdef CONFIG_PM 2381 2382 int sdhci_suspend_host(struct sdhci_host *host) 2383 { 2384 int ret; 2385 bool has_tuning_timer; 2386 2387 if (host->ops->platform_suspend) 2388 host->ops->platform_suspend(host); 2389 2390 sdhci_disable_card_detection(host); 2391 2392 /* Disable tuning since we are suspending */ 2393 has_tuning_timer = host->version >= SDHCI_SPEC_300 && 2394 host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1; 2395 if (has_tuning_timer) { 2396 del_timer_sync(&host->tuning_timer); 2397 host->flags &= ~SDHCI_NEEDS_RETUNING; 2398 } 2399 2400 ret = mmc_suspend_host(host->mmc); 2401 if (ret) { 2402 if (has_tuning_timer) { 2403 host->flags |= SDHCI_NEEDS_RETUNING; 2404 mod_timer(&host->tuning_timer, jiffies + 2405 host->tuning_count * HZ); 2406 } 2407 2408 sdhci_enable_card_detection(host); 2409 2410 return ret; 2411 } 2412 2413 free_irq(host->irq, host); 2414 2415 return ret; 2416 } 2417 2418 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 2419 2420 int sdhci_resume_host(struct sdhci_host *host) 2421 { 2422 int ret; 2423 2424 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2425 if (host->ops->enable_dma) 2426 host->ops->enable_dma(host); 2427 } 2428 2429 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2430 mmc_hostname(host->mmc), host); 2431 if (ret) 2432 return ret; 2433 2434 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2435 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2436 /* Card keeps power but host controller does not */ 2437 sdhci_init(host, 0); 2438 host->pwr = 0; 2439 host->clock = 0; 2440 sdhci_do_set_ios(host, &host->mmc->ios); 2441 } else { 2442 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2443 mmiowb(); 2444 } 2445 2446 ret = mmc_resume_host(host->mmc); 2447 sdhci_enable_card_detection(host); 2448 2449 if (host->ops->platform_resume) 2450 host->ops->platform_resume(host); 2451 2452 /* Set the re-tuning expiration flag */ 2453 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && 2454 (host->tuning_mode == SDHCI_TUNING_MODE_1)) 2455 host->flags |= SDHCI_NEEDS_RETUNING; 2456 2457 return ret; 2458 } 2459 2460 EXPORT_SYMBOL_GPL(sdhci_resume_host); 2461 2462 void sdhci_enable_irq_wakeups(struct sdhci_host *host) 2463 { 2464 u8 val; 2465 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2466 val |= SDHCI_WAKE_ON_INT; 2467 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2468 } 2469 2470 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 2471 2472 #endif /* CONFIG_PM */ 2473 2474 #ifdef CONFIG_PM_RUNTIME 2475 2476 static int sdhci_runtime_pm_get(struct sdhci_host *host) 2477 { 2478 return pm_runtime_get_sync(host->mmc->parent); 2479 } 2480 2481 static int sdhci_runtime_pm_put(struct sdhci_host *host) 2482 { 2483 pm_runtime_mark_last_busy(host->mmc->parent); 2484 return pm_runtime_put_autosuspend(host->mmc->parent); 2485 } 2486 2487 int sdhci_runtime_suspend_host(struct sdhci_host *host) 2488 { 2489 unsigned long flags; 2490 int ret = 0; 2491 2492 /* Disable tuning since we are suspending */ 2493 if (host->version >= SDHCI_SPEC_300 && 2494 host->tuning_mode == SDHCI_TUNING_MODE_1) { 2495 del_timer_sync(&host->tuning_timer); 2496 host->flags &= ~SDHCI_NEEDS_RETUNING; 2497 } 2498 2499 spin_lock_irqsave(&host->lock, flags); 2500 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); 2501 spin_unlock_irqrestore(&host->lock, flags); 2502 2503 synchronize_irq(host->irq); 2504 2505 spin_lock_irqsave(&host->lock, flags); 2506 host->runtime_suspended = true; 2507 spin_unlock_irqrestore(&host->lock, flags); 2508 2509 return ret; 2510 } 2511 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 2512 2513 int sdhci_runtime_resume_host(struct sdhci_host *host) 2514 { 2515 unsigned long flags; 2516 int ret = 0, host_flags = host->flags; 2517 2518 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2519 if (host->ops->enable_dma) 2520 host->ops->enable_dma(host); 2521 } 2522 2523 sdhci_init(host, 0); 2524 2525 /* Force clock and power re-program */ 2526 host->pwr = 0; 2527 host->clock = 0; 2528 sdhci_do_set_ios(host, &host->mmc->ios); 2529 2530 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); 2531 if (host_flags & SDHCI_PV_ENABLED) 2532 sdhci_do_enable_preset_value(host, true); 2533 2534 /* Set the re-tuning expiration flag */ 2535 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && 2536 (host->tuning_mode == SDHCI_TUNING_MODE_1)) 2537 host->flags |= SDHCI_NEEDS_RETUNING; 2538 2539 spin_lock_irqsave(&host->lock, flags); 2540 2541 host->runtime_suspended = false; 2542 2543 /* Enable SDIO IRQ */ 2544 if ((host->flags & SDHCI_SDIO_IRQ_ENABLED)) 2545 sdhci_enable_sdio_irq_nolock(host, true); 2546 2547 /* Enable Card Detection */ 2548 sdhci_enable_card_detection(host); 2549 2550 spin_unlock_irqrestore(&host->lock, flags); 2551 2552 return ret; 2553 } 2554 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2555 2556 #endif 2557 2558 /*****************************************************************************\ 2559 * * 2560 * Device allocation/registration * 2561 * * 2562 \*****************************************************************************/ 2563 2564 struct sdhci_host *sdhci_alloc_host(struct device *dev, 2565 size_t priv_size) 2566 { 2567 struct mmc_host *mmc; 2568 struct sdhci_host *host; 2569 2570 WARN_ON(dev == NULL); 2571 2572 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 2573 if (!mmc) 2574 return ERR_PTR(-ENOMEM); 2575 2576 host = mmc_priv(mmc); 2577 host->mmc = mmc; 2578 2579 return host; 2580 } 2581 2582 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 2583 2584 int sdhci_add_host(struct sdhci_host *host) 2585 { 2586 struct mmc_host *mmc; 2587 u32 caps[2]; 2588 u32 max_current_caps; 2589 unsigned int ocr_avail; 2590 int ret; 2591 2592 WARN_ON(host == NULL); 2593 if (host == NULL) 2594 return -EINVAL; 2595 2596 mmc = host->mmc; 2597 2598 if (debug_quirks) 2599 host->quirks = debug_quirks; 2600 if (debug_quirks2) 2601 host->quirks2 = debug_quirks2; 2602 2603 sdhci_reset(host, SDHCI_RESET_ALL); 2604 2605 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 2606 host->version = (host->version & SDHCI_SPEC_VER_MASK) 2607 >> SDHCI_SPEC_VER_SHIFT; 2608 if (host->version > SDHCI_SPEC_300) { 2609 pr_err("%s: Unknown controller version (%d). " 2610 "You may experience problems.\n", mmc_hostname(mmc), 2611 host->version); 2612 } 2613 2614 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2615 sdhci_readl(host, SDHCI_CAPABILITIES); 2616 2617 caps[1] = (host->version >= SDHCI_SPEC_300) ? 2618 sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0; 2619 2620 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2621 host->flags |= SDHCI_USE_SDMA; 2622 else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) 2623 DBG("Controller doesn't have SDMA capability\n"); 2624 else 2625 host->flags |= SDHCI_USE_SDMA; 2626 2627 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 2628 (host->flags & SDHCI_USE_SDMA)) { 2629 DBG("Disabling DMA as it is marked broken\n"); 2630 host->flags &= ~SDHCI_USE_SDMA; 2631 } 2632 2633 if ((host->version >= SDHCI_SPEC_200) && 2634 (caps[0] & SDHCI_CAN_DO_ADMA2)) 2635 host->flags |= SDHCI_USE_ADMA; 2636 2637 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2638 (host->flags & SDHCI_USE_ADMA)) { 2639 DBG("Disabling ADMA as it is marked broken\n"); 2640 host->flags &= ~SDHCI_USE_ADMA; 2641 } 2642 2643 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2644 if (host->ops->enable_dma) { 2645 if (host->ops->enable_dma(host)) { 2646 pr_warning("%s: No suitable DMA " 2647 "available. Falling back to PIO.\n", 2648 mmc_hostname(mmc)); 2649 host->flags &= 2650 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 2651 } 2652 } 2653 } 2654 2655 if (host->flags & SDHCI_USE_ADMA) { 2656 /* 2657 * We need to allocate descriptors for all sg entries 2658 * (128) and potentially one alignment transfer for 2659 * each of those entries. 2660 */ 2661 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); 2662 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 2663 if (!host->adma_desc || !host->align_buffer) { 2664 kfree(host->adma_desc); 2665 kfree(host->align_buffer); 2666 pr_warning("%s: Unable to allocate ADMA " 2667 "buffers. Falling back to standard DMA.\n", 2668 mmc_hostname(mmc)); 2669 host->flags &= ~SDHCI_USE_ADMA; 2670 } 2671 } 2672 2673 /* 2674 * If we use DMA, then it's up to the caller to set the DMA 2675 * mask, but PIO does not need the hw shim so we set a new 2676 * mask here in that case. 2677 */ 2678 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 2679 host->dma_mask = DMA_BIT_MASK(64); 2680 mmc_dev(host->mmc)->dma_mask = &host->dma_mask; 2681 } 2682 2683 if (host->version >= SDHCI_SPEC_300) 2684 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) 2685 >> SDHCI_CLOCK_BASE_SHIFT; 2686 else 2687 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) 2688 >> SDHCI_CLOCK_BASE_SHIFT; 2689 2690 host->max_clk *= 1000000; 2691 if (host->max_clk == 0 || host->quirks & 2692 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 2693 if (!host->ops->get_max_clock) { 2694 pr_err("%s: Hardware doesn't specify base clock " 2695 "frequency.\n", mmc_hostname(mmc)); 2696 return -ENODEV; 2697 } 2698 host->max_clk = host->ops->get_max_clock(host); 2699 } 2700 2701 /* 2702 * In case of Host Controller v3.00, find out whether clock 2703 * multiplier is supported. 2704 */ 2705 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> 2706 SDHCI_CLOCK_MUL_SHIFT; 2707 2708 /* 2709 * In case the value in Clock Multiplier is 0, then programmable 2710 * clock mode is not supported, otherwise the actual clock 2711 * multiplier is one more than the value of Clock Multiplier 2712 * in the Capabilities Register. 2713 */ 2714 if (host->clk_mul) 2715 host->clk_mul += 1; 2716 2717 /* 2718 * Set host parameters. 2719 */ 2720 mmc->ops = &sdhci_ops; 2721 mmc->f_max = host->max_clk; 2722 if (host->ops->get_min_clock) 2723 mmc->f_min = host->ops->get_min_clock(host); 2724 else if (host->version >= SDHCI_SPEC_300) { 2725 if (host->clk_mul) { 2726 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 2727 mmc->f_max = host->max_clk * host->clk_mul; 2728 } else 2729 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 2730 } else 2731 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2732 2733 host->timeout_clk = 2734 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 2735 if (host->timeout_clk == 0) { 2736 if (host->ops->get_timeout_clock) { 2737 host->timeout_clk = host->ops->get_timeout_clock(host); 2738 } else if (!(host->quirks & 2739 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 2740 pr_err("%s: Hardware doesn't specify timeout clock " 2741 "frequency.\n", mmc_hostname(mmc)); 2742 return -ENODEV; 2743 } 2744 } 2745 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) 2746 host->timeout_clk *= 1000; 2747 2748 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 2749 host->timeout_clk = mmc->f_max / 1000; 2750 2751 mmc->max_discard_to = (1 << 27) / host->timeout_clk; 2752 2753 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2754 2755 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 2756 host->flags |= SDHCI_AUTO_CMD12; 2757 2758 /* Auto-CMD23 stuff only works in ADMA or PIO. */ 2759 if ((host->version >= SDHCI_SPEC_300) && 2760 ((host->flags & SDHCI_USE_ADMA) || 2761 !(host->flags & SDHCI_USE_SDMA))) { 2762 host->flags |= SDHCI_AUTO_CMD23; 2763 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); 2764 } else { 2765 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); 2766 } 2767 2768 /* 2769 * A controller may support 8-bit width, but the board itself 2770 * might not have the pins brought out. Boards that support 2771 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 2772 * their platform code before calling sdhci_add_host(), and we 2773 * won't assume 8-bit width for hosts without that CAP. 2774 */ 2775 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 2776 mmc->caps |= MMC_CAP_4_BIT_DATA; 2777 2778 if (caps[0] & SDHCI_CAN_DO_HISPD) 2779 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2780 2781 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 2782 mmc_card_is_removable(mmc)) 2783 mmc->caps |= MMC_CAP_NEEDS_POLL; 2784 2785 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 2786 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 2787 SDHCI_SUPPORT_DDR50)) 2788 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 2789 2790 /* SDR104 supports also implies SDR50 support */ 2791 if (caps[1] & SDHCI_SUPPORT_SDR104) 2792 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 2793 else if (caps[1] & SDHCI_SUPPORT_SDR50) 2794 mmc->caps |= MMC_CAP_UHS_SDR50; 2795 2796 if (caps[1] & SDHCI_SUPPORT_DDR50) 2797 mmc->caps |= MMC_CAP_UHS_DDR50; 2798 2799 /* Does the host need tuning for SDR50? */ 2800 if (caps[1] & SDHCI_USE_SDR50_TUNING) 2801 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 2802 2803 /* Does the host need tuning for HS200? */ 2804 if (mmc->caps2 & MMC_CAP2_HS200) 2805 host->flags |= SDHCI_HS200_NEEDS_TUNING; 2806 2807 /* Driver Type(s) (A, C, D) supported by the host */ 2808 if (caps[1] & SDHCI_DRIVER_TYPE_A) 2809 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 2810 if (caps[1] & SDHCI_DRIVER_TYPE_C) 2811 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 2812 if (caps[1] & SDHCI_DRIVER_TYPE_D) 2813 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 2814 2815 /* 2816 * If Power Off Notify capability is enabled by the host, 2817 * set notify to short power off notify timeout value. 2818 */ 2819 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) 2820 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 2821 else 2822 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; 2823 2824 /* Initial value for re-tuning timer count */ 2825 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 2826 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 2827 2828 /* 2829 * In case Re-tuning Timer is not disabled, the actual value of 2830 * re-tuning timer will be 2 ^ (n - 1). 2831 */ 2832 if (host->tuning_count) 2833 host->tuning_count = 1 << (host->tuning_count - 1); 2834 2835 /* Re-tuning mode supported by the Host Controller */ 2836 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> 2837 SDHCI_RETUNING_MODE_SHIFT; 2838 2839 ocr_avail = 0; 2840 /* 2841 * According to SD Host Controller spec v3.00, if the Host System 2842 * can afford more than 150mA, Host Driver should set XPC to 1. Also 2843 * the value is meaningful only if Voltage Support in the Capabilities 2844 * register is set. The actual current value is 4 times the register 2845 * value. 2846 */ 2847 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 2848 2849 if (caps[0] & SDHCI_CAN_VDD_330) { 2850 int max_current_330; 2851 2852 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 2853 2854 max_current_330 = ((max_current_caps & 2855 SDHCI_MAX_CURRENT_330_MASK) >> 2856 SDHCI_MAX_CURRENT_330_SHIFT) * 2857 SDHCI_MAX_CURRENT_MULTIPLIER; 2858 2859 if (max_current_330 > 150) 2860 mmc->caps |= MMC_CAP_SET_XPC_330; 2861 } 2862 if (caps[0] & SDHCI_CAN_VDD_300) { 2863 int max_current_300; 2864 2865 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 2866 2867 max_current_300 = ((max_current_caps & 2868 SDHCI_MAX_CURRENT_300_MASK) >> 2869 SDHCI_MAX_CURRENT_300_SHIFT) * 2870 SDHCI_MAX_CURRENT_MULTIPLIER; 2871 2872 if (max_current_300 > 150) 2873 mmc->caps |= MMC_CAP_SET_XPC_300; 2874 } 2875 if (caps[0] & SDHCI_CAN_VDD_180) { 2876 int max_current_180; 2877 2878 ocr_avail |= MMC_VDD_165_195; 2879 2880 max_current_180 = ((max_current_caps & 2881 SDHCI_MAX_CURRENT_180_MASK) >> 2882 SDHCI_MAX_CURRENT_180_SHIFT) * 2883 SDHCI_MAX_CURRENT_MULTIPLIER; 2884 2885 if (max_current_180 > 150) 2886 mmc->caps |= MMC_CAP_SET_XPC_180; 2887 2888 /* Maximum current capabilities of the host at 1.8V */ 2889 if (max_current_180 >= 800) 2890 mmc->caps |= MMC_CAP_MAX_CURRENT_800; 2891 else if (max_current_180 >= 600) 2892 mmc->caps |= MMC_CAP_MAX_CURRENT_600; 2893 else if (max_current_180 >= 400) 2894 mmc->caps |= MMC_CAP_MAX_CURRENT_400; 2895 else 2896 mmc->caps |= MMC_CAP_MAX_CURRENT_200; 2897 } 2898 2899 mmc->ocr_avail = ocr_avail; 2900 mmc->ocr_avail_sdio = ocr_avail; 2901 if (host->ocr_avail_sdio) 2902 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 2903 mmc->ocr_avail_sd = ocr_avail; 2904 if (host->ocr_avail_sd) 2905 mmc->ocr_avail_sd &= host->ocr_avail_sd; 2906 else /* normal SD controllers don't support 1.8V */ 2907 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 2908 mmc->ocr_avail_mmc = ocr_avail; 2909 if (host->ocr_avail_mmc) 2910 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 2911 2912 if (mmc->ocr_avail == 0) { 2913 pr_err("%s: Hardware doesn't report any " 2914 "support voltages.\n", mmc_hostname(mmc)); 2915 return -ENODEV; 2916 } 2917 2918 spin_lock_init(&host->lock); 2919 2920 /* 2921 * Maximum number of segments. Depends on if the hardware 2922 * can do scatter/gather or not. 2923 */ 2924 if (host->flags & SDHCI_USE_ADMA) 2925 mmc->max_segs = 128; 2926 else if (host->flags & SDHCI_USE_SDMA) 2927 mmc->max_segs = 1; 2928 else /* PIO */ 2929 mmc->max_segs = 128; 2930 2931 /* 2932 * Maximum number of sectors in one transfer. Limited by DMA boundary 2933 * size (512KiB). 2934 */ 2935 mmc->max_req_size = 524288; 2936 2937 /* 2938 * Maximum segment size. Could be one segment with the maximum number 2939 * of bytes. When doing hardware scatter/gather, each entry cannot 2940 * be larger than 64 KiB though. 2941 */ 2942 if (host->flags & SDHCI_USE_ADMA) { 2943 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 2944 mmc->max_seg_size = 65535; 2945 else 2946 mmc->max_seg_size = 65536; 2947 } else { 2948 mmc->max_seg_size = mmc->max_req_size; 2949 } 2950 2951 /* 2952 * Maximum block size. This varies from controller to controller and 2953 * is specified in the capabilities register. 2954 */ 2955 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 2956 mmc->max_blk_size = 2; 2957 } else { 2958 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> 2959 SDHCI_MAX_BLOCK_SHIFT; 2960 if (mmc->max_blk_size >= 3) { 2961 pr_warning("%s: Invalid maximum block size, " 2962 "assuming 512 bytes\n", mmc_hostname(mmc)); 2963 mmc->max_blk_size = 0; 2964 } 2965 } 2966 2967 mmc->max_blk_size = 512 << mmc->max_blk_size; 2968 2969 /* 2970 * Maximum block count. 2971 */ 2972 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 2973 2974 /* 2975 * Init tasklets. 2976 */ 2977 tasklet_init(&host->card_tasklet, 2978 sdhci_tasklet_card, (unsigned long)host); 2979 tasklet_init(&host->finish_tasklet, 2980 sdhci_tasklet_finish, (unsigned long)host); 2981 2982 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 2983 2984 if (host->version >= SDHCI_SPEC_300) { 2985 init_waitqueue_head(&host->buf_ready_int); 2986 2987 /* Initialize re-tuning timer */ 2988 init_timer(&host->tuning_timer); 2989 host->tuning_timer.data = (unsigned long)host; 2990 host->tuning_timer.function = sdhci_tuning_timer; 2991 } 2992 2993 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2994 mmc_hostname(mmc), host); 2995 if (ret) 2996 goto untasklet; 2997 2998 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 2999 if (IS_ERR(host->vmmc)) { 3000 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 3001 host->vmmc = NULL; 3002 } 3003 3004 sdhci_init(host, 0); 3005 3006 #ifdef CONFIG_MMC_DEBUG 3007 sdhci_dumpregs(host); 3008 #endif 3009 3010 #ifdef SDHCI_USE_LEDS_CLASS 3011 snprintf(host->led_name, sizeof(host->led_name), 3012 "%s::", mmc_hostname(mmc)); 3013 host->led.name = host->led_name; 3014 host->led.brightness = LED_OFF; 3015 host->led.default_trigger = mmc_hostname(mmc); 3016 host->led.brightness_set = sdhci_led_control; 3017 3018 ret = led_classdev_register(mmc_dev(mmc), &host->led); 3019 if (ret) 3020 goto reset; 3021 #endif 3022 3023 mmiowb(); 3024 3025 mmc_add_host(mmc); 3026 3027 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3028 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3029 (host->flags & SDHCI_USE_ADMA) ? "ADMA" : 3030 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3031 3032 sdhci_enable_card_detection(host); 3033 3034 return 0; 3035 3036 #ifdef SDHCI_USE_LEDS_CLASS 3037 reset: 3038 sdhci_reset(host, SDHCI_RESET_ALL); 3039 free_irq(host->irq, host); 3040 #endif 3041 untasklet: 3042 tasklet_kill(&host->card_tasklet); 3043 tasklet_kill(&host->finish_tasklet); 3044 3045 return ret; 3046 } 3047 3048 EXPORT_SYMBOL_GPL(sdhci_add_host); 3049 3050 void sdhci_remove_host(struct sdhci_host *host, int dead) 3051 { 3052 unsigned long flags; 3053 3054 if (dead) { 3055 spin_lock_irqsave(&host->lock, flags); 3056 3057 host->flags |= SDHCI_DEVICE_DEAD; 3058 3059 if (host->mrq) { 3060 pr_err("%s: Controller removed during " 3061 " transfer!\n", mmc_hostname(host->mmc)); 3062 3063 host->mrq->cmd->error = -ENOMEDIUM; 3064 tasklet_schedule(&host->finish_tasklet); 3065 } 3066 3067 spin_unlock_irqrestore(&host->lock, flags); 3068 } 3069 3070 sdhci_disable_card_detection(host); 3071 3072 mmc_remove_host(host->mmc); 3073 3074 #ifdef SDHCI_USE_LEDS_CLASS 3075 led_classdev_unregister(&host->led); 3076 #endif 3077 3078 if (!dead) 3079 sdhci_reset(host, SDHCI_RESET_ALL); 3080 3081 free_irq(host->irq, host); 3082 3083 del_timer_sync(&host->timer); 3084 if (host->version >= SDHCI_SPEC_300) 3085 del_timer_sync(&host->tuning_timer); 3086 3087 tasklet_kill(&host->card_tasklet); 3088 tasklet_kill(&host->finish_tasklet); 3089 3090 if (host->vmmc) 3091 regulator_put(host->vmmc); 3092 3093 kfree(host->adma_desc); 3094 kfree(host->align_buffer); 3095 3096 host->adma_desc = NULL; 3097 host->align_buffer = NULL; 3098 } 3099 3100 EXPORT_SYMBOL_GPL(sdhci_remove_host); 3101 3102 void sdhci_free_host(struct sdhci_host *host) 3103 { 3104 mmc_free_host(host->mmc); 3105 } 3106 3107 EXPORT_SYMBOL_GPL(sdhci_free_host); 3108 3109 /*****************************************************************************\ 3110 * * 3111 * Driver init/exit * 3112 * * 3113 \*****************************************************************************/ 3114 3115 static int __init sdhci_drv_init(void) 3116 { 3117 pr_info(DRIVER_NAME 3118 ": Secure Digital Host Controller Interface driver\n"); 3119 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 3120 3121 return 0; 3122 } 3123 3124 static void __exit sdhci_drv_exit(void) 3125 { 3126 } 3127 3128 module_init(sdhci_drv_init); 3129 module_exit(sdhci_drv_exit); 3130 3131 module_param(debug_quirks, uint, 0444); 3132 module_param(debug_quirks2, uint, 0444); 3133 3134 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 3135 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 3136 MODULE_LICENSE("GPL"); 3137 3138 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 3139 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 3140