1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/ktime.h> 18 #include <linux/highmem.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/slab.h> 23 #include <linux/scatterlist.h> 24 #include <linux/sizes.h> 25 #include <linux/swiotlb.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/of.h> 29 30 #include <linux/leds.h> 31 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/host.h> 34 #include <linux/mmc/card.h> 35 #include <linux/mmc/sdio.h> 36 #include <linux/mmc/slot-gpio.h> 37 38 #include "sdhci.h" 39 40 #define DRIVER_NAME "sdhci" 41 42 #define DBG(f, x...) \ 43 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define SDHCI_DUMP(f, x...) \ 46 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 47 48 #define MAX_TUNING_LOOP 40 49 50 static unsigned int debug_quirks = 0; 51 static unsigned int debug_quirks2; 52 53 static void sdhci_finish_data(struct sdhci_host *); 54 55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 56 57 void sdhci_dumpregs(struct sdhci_host *host) 58 { 59 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 60 61 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 62 sdhci_readl(host, SDHCI_DMA_ADDRESS), 63 sdhci_readw(host, SDHCI_HOST_VERSION)); 64 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 65 sdhci_readw(host, SDHCI_BLOCK_SIZE), 66 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 67 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 68 sdhci_readl(host, SDHCI_ARGUMENT), 69 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 70 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 71 sdhci_readl(host, SDHCI_PRESENT_STATE), 72 sdhci_readb(host, SDHCI_HOST_CONTROL)); 73 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 74 sdhci_readb(host, SDHCI_POWER_CONTROL), 75 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 76 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 77 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 78 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 79 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 80 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 81 sdhci_readl(host, SDHCI_INT_STATUS)); 82 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 83 sdhci_readl(host, SDHCI_INT_ENABLE), 84 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 85 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n", 86 sdhci_readw(host, SDHCI_ACMD12_ERR), 87 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 88 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 89 sdhci_readl(host, SDHCI_CAPABILITIES), 90 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 91 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 92 sdhci_readw(host, SDHCI_COMMAND), 93 sdhci_readl(host, SDHCI_MAX_CURRENT)); 94 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE), 96 sdhci_readl(host, SDHCI_RESPONSE + 4)); 97 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 98 sdhci_readl(host, SDHCI_RESPONSE + 8), 99 sdhci_readl(host, SDHCI_RESPONSE + 12)); 100 SDHCI_DUMP("Host ctl2: 0x%08x\n", 101 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 102 103 if (host->flags & SDHCI_USE_ADMA) { 104 if (host->flags & SDHCI_USE_64_BIT_DMA) { 105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 106 sdhci_readl(host, SDHCI_ADMA_ERROR), 107 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 108 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 109 } else { 110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 111 sdhci_readl(host, SDHCI_ADMA_ERROR), 112 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 113 } 114 } 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 127 { 128 return cmd->data || cmd->flags & MMC_RSP_BUSY; 129 } 130 131 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 132 { 133 u32 present; 134 135 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 136 !mmc_card_is_removable(host->mmc)) 137 return; 138 139 if (enable) { 140 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 141 SDHCI_CARD_PRESENT; 142 143 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 144 SDHCI_INT_CARD_INSERT; 145 } else { 146 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 147 } 148 149 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 150 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 151 } 152 153 static void sdhci_enable_card_detection(struct sdhci_host *host) 154 { 155 sdhci_set_card_detection(host, true); 156 } 157 158 static void sdhci_disable_card_detection(struct sdhci_host *host) 159 { 160 sdhci_set_card_detection(host, false); 161 } 162 163 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 164 { 165 if (host->bus_on) 166 return; 167 host->bus_on = true; 168 pm_runtime_get_noresume(host->mmc->parent); 169 } 170 171 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 172 { 173 if (!host->bus_on) 174 return; 175 host->bus_on = false; 176 pm_runtime_put_noidle(host->mmc->parent); 177 } 178 179 void sdhci_reset(struct sdhci_host *host, u8 mask) 180 { 181 ktime_t timeout; 182 183 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 184 185 if (mask & SDHCI_RESET_ALL) { 186 host->clock = 0; 187 /* Reset-all turns off SD Bus Power */ 188 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 189 sdhci_runtime_pm_bus_off(host); 190 } 191 192 /* Wait max 100 ms */ 193 timeout = ktime_add_ms(ktime_get(), 100); 194 195 /* hw clears the bit when it's done */ 196 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 197 if (ktime_after(ktime_get(), timeout)) { 198 pr_err("%s: Reset 0x%x never completed.\n", 199 mmc_hostname(host->mmc), (int)mask); 200 sdhci_dumpregs(host); 201 return; 202 } 203 udelay(10); 204 } 205 } 206 EXPORT_SYMBOL_GPL(sdhci_reset); 207 208 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 209 { 210 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 211 struct mmc_host *mmc = host->mmc; 212 213 if (!mmc->ops->get_cd(mmc)) 214 return; 215 } 216 217 host->ops->reset(host, mask); 218 219 if (mask & SDHCI_RESET_ALL) { 220 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 221 if (host->ops->enable_dma) 222 host->ops->enable_dma(host); 223 } 224 225 /* Resetting the controller clears many */ 226 host->preset_enabled = false; 227 } 228 } 229 230 static void sdhci_set_default_irqs(struct sdhci_host *host) 231 { 232 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 233 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 234 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 235 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 236 SDHCI_INT_RESPONSE; 237 238 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 239 host->tuning_mode == SDHCI_TUNING_MODE_3) 240 host->ier |= SDHCI_INT_RETUNE; 241 242 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 243 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 244 } 245 246 static void sdhci_init(struct sdhci_host *host, int soft) 247 { 248 struct mmc_host *mmc = host->mmc; 249 250 if (soft) 251 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 252 else 253 sdhci_do_reset(host, SDHCI_RESET_ALL); 254 255 sdhci_set_default_irqs(host); 256 257 host->cqe_on = false; 258 259 if (soft) { 260 /* force clock reconfiguration */ 261 host->clock = 0; 262 mmc->ops->set_ios(mmc, &mmc->ios); 263 } 264 } 265 266 static void sdhci_reinit(struct sdhci_host *host) 267 { 268 sdhci_init(host, 0); 269 sdhci_enable_card_detection(host); 270 } 271 272 static void __sdhci_led_activate(struct sdhci_host *host) 273 { 274 u8 ctrl; 275 276 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 277 ctrl |= SDHCI_CTRL_LED; 278 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 279 } 280 281 static void __sdhci_led_deactivate(struct sdhci_host *host) 282 { 283 u8 ctrl; 284 285 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 286 ctrl &= ~SDHCI_CTRL_LED; 287 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 288 } 289 290 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 291 static void sdhci_led_control(struct led_classdev *led, 292 enum led_brightness brightness) 293 { 294 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 295 unsigned long flags; 296 297 spin_lock_irqsave(&host->lock, flags); 298 299 if (host->runtime_suspended) 300 goto out; 301 302 if (brightness == LED_OFF) 303 __sdhci_led_deactivate(host); 304 else 305 __sdhci_led_activate(host); 306 out: 307 spin_unlock_irqrestore(&host->lock, flags); 308 } 309 310 static int sdhci_led_register(struct sdhci_host *host) 311 { 312 struct mmc_host *mmc = host->mmc; 313 314 snprintf(host->led_name, sizeof(host->led_name), 315 "%s::", mmc_hostname(mmc)); 316 317 host->led.name = host->led_name; 318 host->led.brightness = LED_OFF; 319 host->led.default_trigger = mmc_hostname(mmc); 320 host->led.brightness_set = sdhci_led_control; 321 322 return led_classdev_register(mmc_dev(mmc), &host->led); 323 } 324 325 static void sdhci_led_unregister(struct sdhci_host *host) 326 { 327 led_classdev_unregister(&host->led); 328 } 329 330 static inline void sdhci_led_activate(struct sdhci_host *host) 331 { 332 } 333 334 static inline void sdhci_led_deactivate(struct sdhci_host *host) 335 { 336 } 337 338 #else 339 340 static inline int sdhci_led_register(struct sdhci_host *host) 341 { 342 return 0; 343 } 344 345 static inline void sdhci_led_unregister(struct sdhci_host *host) 346 { 347 } 348 349 static inline void sdhci_led_activate(struct sdhci_host *host) 350 { 351 __sdhci_led_activate(host); 352 } 353 354 static inline void sdhci_led_deactivate(struct sdhci_host *host) 355 { 356 __sdhci_led_deactivate(host); 357 } 358 359 #endif 360 361 /*****************************************************************************\ 362 * * 363 * Core functions * 364 * * 365 \*****************************************************************************/ 366 367 static void sdhci_read_block_pio(struct sdhci_host *host) 368 { 369 unsigned long flags; 370 size_t blksize, len, chunk; 371 u32 uninitialized_var(scratch); 372 u8 *buf; 373 374 DBG("PIO reading\n"); 375 376 blksize = host->data->blksz; 377 chunk = 0; 378 379 local_irq_save(flags); 380 381 while (blksize) { 382 BUG_ON(!sg_miter_next(&host->sg_miter)); 383 384 len = min(host->sg_miter.length, blksize); 385 386 blksize -= len; 387 host->sg_miter.consumed = len; 388 389 buf = host->sg_miter.addr; 390 391 while (len) { 392 if (chunk == 0) { 393 scratch = sdhci_readl(host, SDHCI_BUFFER); 394 chunk = 4; 395 } 396 397 *buf = scratch & 0xFF; 398 399 buf++; 400 scratch >>= 8; 401 chunk--; 402 len--; 403 } 404 } 405 406 sg_miter_stop(&host->sg_miter); 407 408 local_irq_restore(flags); 409 } 410 411 static void sdhci_write_block_pio(struct sdhci_host *host) 412 { 413 unsigned long flags; 414 size_t blksize, len, chunk; 415 u32 scratch; 416 u8 *buf; 417 418 DBG("PIO writing\n"); 419 420 blksize = host->data->blksz; 421 chunk = 0; 422 scratch = 0; 423 424 local_irq_save(flags); 425 426 while (blksize) { 427 BUG_ON(!sg_miter_next(&host->sg_miter)); 428 429 len = min(host->sg_miter.length, blksize); 430 431 blksize -= len; 432 host->sg_miter.consumed = len; 433 434 buf = host->sg_miter.addr; 435 436 while (len) { 437 scratch |= (u32)*buf << (chunk * 8); 438 439 buf++; 440 chunk++; 441 len--; 442 443 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 444 sdhci_writel(host, scratch, SDHCI_BUFFER); 445 chunk = 0; 446 scratch = 0; 447 } 448 } 449 } 450 451 sg_miter_stop(&host->sg_miter); 452 453 local_irq_restore(flags); 454 } 455 456 static void sdhci_transfer_pio(struct sdhci_host *host) 457 { 458 u32 mask; 459 460 if (host->blocks == 0) 461 return; 462 463 if (host->data->flags & MMC_DATA_READ) 464 mask = SDHCI_DATA_AVAILABLE; 465 else 466 mask = SDHCI_SPACE_AVAILABLE; 467 468 /* 469 * Some controllers (JMicron JMB38x) mess up the buffer bits 470 * for transfers < 4 bytes. As long as it is just one block, 471 * we can ignore the bits. 472 */ 473 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 474 (host->data->blocks == 1)) 475 mask = ~0; 476 477 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 478 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 479 udelay(100); 480 481 if (host->data->flags & MMC_DATA_READ) 482 sdhci_read_block_pio(host); 483 else 484 sdhci_write_block_pio(host); 485 486 host->blocks--; 487 if (host->blocks == 0) 488 break; 489 } 490 491 DBG("PIO transfer complete.\n"); 492 } 493 494 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 495 struct mmc_data *data, int cookie) 496 { 497 int sg_count; 498 499 /* 500 * If the data buffers are already mapped, return the previous 501 * dma_map_sg() result. 502 */ 503 if (data->host_cookie == COOKIE_PRE_MAPPED) 504 return data->sg_count; 505 506 /* Bounce write requests to the bounce buffer */ 507 if (host->bounce_buffer) { 508 unsigned int length = data->blksz * data->blocks; 509 510 if (length > host->bounce_buffer_size) { 511 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 512 mmc_hostname(host->mmc), length, 513 host->bounce_buffer_size); 514 return -EIO; 515 } 516 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 517 /* Copy the data to the bounce buffer */ 518 sg_copy_to_buffer(data->sg, data->sg_len, 519 host->bounce_buffer, 520 length); 521 } 522 /* Switch ownership to the DMA */ 523 dma_sync_single_for_device(host->mmc->parent, 524 host->bounce_addr, 525 host->bounce_buffer_size, 526 mmc_get_dma_dir(data)); 527 /* Just a dummy value */ 528 sg_count = 1; 529 } else { 530 /* Just access the data directly from memory */ 531 sg_count = dma_map_sg(mmc_dev(host->mmc), 532 data->sg, data->sg_len, 533 mmc_get_dma_dir(data)); 534 } 535 536 if (sg_count == 0) 537 return -ENOSPC; 538 539 data->sg_count = sg_count; 540 data->host_cookie = cookie; 541 542 return sg_count; 543 } 544 545 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 546 { 547 local_irq_save(*flags); 548 return kmap_atomic(sg_page(sg)) + sg->offset; 549 } 550 551 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 552 { 553 kunmap_atomic(buffer); 554 local_irq_restore(*flags); 555 } 556 557 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, 558 dma_addr_t addr, int len, unsigned cmd) 559 { 560 struct sdhci_adma2_64_desc *dma_desc = desc; 561 562 /* 32-bit and 64-bit descriptors have these members in same position */ 563 dma_desc->cmd = cpu_to_le16(cmd); 564 dma_desc->len = cpu_to_le16(len); 565 dma_desc->addr_lo = cpu_to_le32((u32)addr); 566 567 if (host->flags & SDHCI_USE_64_BIT_DMA) 568 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); 569 } 570 571 static void sdhci_adma_mark_end(void *desc) 572 { 573 struct sdhci_adma2_64_desc *dma_desc = desc; 574 575 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 576 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 577 } 578 579 static void sdhci_adma_table_pre(struct sdhci_host *host, 580 struct mmc_data *data, int sg_count) 581 { 582 struct scatterlist *sg; 583 unsigned long flags; 584 dma_addr_t addr, align_addr; 585 void *desc, *align; 586 char *buffer; 587 int len, offset, i; 588 589 /* 590 * The spec does not specify endianness of descriptor table. 591 * We currently guess that it is LE. 592 */ 593 594 host->sg_count = sg_count; 595 596 desc = host->adma_table; 597 align = host->align_buffer; 598 599 align_addr = host->align_addr; 600 601 for_each_sg(data->sg, sg, host->sg_count, i) { 602 addr = sg_dma_address(sg); 603 len = sg_dma_len(sg); 604 605 /* 606 * The SDHCI specification states that ADMA addresses must 607 * be 32-bit aligned. If they aren't, then we use a bounce 608 * buffer for the (up to three) bytes that screw up the 609 * alignment. 610 */ 611 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 612 SDHCI_ADMA2_MASK; 613 if (offset) { 614 if (data->flags & MMC_DATA_WRITE) { 615 buffer = sdhci_kmap_atomic(sg, &flags); 616 memcpy(align, buffer, offset); 617 sdhci_kunmap_atomic(buffer, &flags); 618 } 619 620 /* tran, valid */ 621 sdhci_adma_write_desc(host, desc, align_addr, offset, 622 ADMA2_TRAN_VALID); 623 624 BUG_ON(offset > 65536); 625 626 align += SDHCI_ADMA2_ALIGN; 627 align_addr += SDHCI_ADMA2_ALIGN; 628 629 desc += host->desc_sz; 630 631 addr += offset; 632 len -= offset; 633 } 634 635 BUG_ON(len > 65536); 636 637 if (len) { 638 /* tran, valid */ 639 sdhci_adma_write_desc(host, desc, addr, len, 640 ADMA2_TRAN_VALID); 641 desc += host->desc_sz; 642 } 643 644 /* 645 * If this triggers then we have a calculation bug 646 * somewhere. :/ 647 */ 648 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 649 } 650 651 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 652 /* Mark the last descriptor as the terminating descriptor */ 653 if (desc != host->adma_table) { 654 desc -= host->desc_sz; 655 sdhci_adma_mark_end(desc); 656 } 657 } else { 658 /* Add a terminating entry - nop, end, valid */ 659 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); 660 } 661 } 662 663 static void sdhci_adma_table_post(struct sdhci_host *host, 664 struct mmc_data *data) 665 { 666 struct scatterlist *sg; 667 int i, size; 668 void *align; 669 char *buffer; 670 unsigned long flags; 671 672 if (data->flags & MMC_DATA_READ) { 673 bool has_unaligned = false; 674 675 /* Do a quick scan of the SG list for any unaligned mappings */ 676 for_each_sg(data->sg, sg, host->sg_count, i) 677 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 678 has_unaligned = true; 679 break; 680 } 681 682 if (has_unaligned) { 683 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 684 data->sg_len, DMA_FROM_DEVICE); 685 686 align = host->align_buffer; 687 688 for_each_sg(data->sg, sg, host->sg_count, i) { 689 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 690 size = SDHCI_ADMA2_ALIGN - 691 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 692 693 buffer = sdhci_kmap_atomic(sg, &flags); 694 memcpy(buffer, align, size); 695 sdhci_kunmap_atomic(buffer, &flags); 696 697 align += SDHCI_ADMA2_ALIGN; 698 } 699 } 700 } 701 } 702 } 703 704 static u32 sdhci_sdma_address(struct sdhci_host *host) 705 { 706 if (host->bounce_buffer) 707 return host->bounce_addr; 708 else 709 return sg_dma_address(host->data->sg); 710 } 711 712 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) 713 { 714 u8 count; 715 struct mmc_data *data = cmd->data; 716 unsigned target_timeout, current_timeout; 717 718 /* 719 * If the host controller provides us with an incorrect timeout 720 * value, just skip the check and use 0xE. The hardware may take 721 * longer to time out, but that's much better than having a too-short 722 * timeout value. 723 */ 724 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 725 return 0xE; 726 727 /* Unspecified timeout, assume max */ 728 if (!data && !cmd->busy_timeout) 729 return 0xE; 730 731 /* timeout in us */ 732 if (!data) 733 target_timeout = cmd->busy_timeout * 1000; 734 else { 735 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 736 if (host->clock && data->timeout_clks) { 737 unsigned long long val; 738 739 /* 740 * data->timeout_clks is in units of clock cycles. 741 * host->clock is in Hz. target_timeout is in us. 742 * Hence, us = 1000000 * cycles / Hz. Round up. 743 */ 744 val = 1000000ULL * data->timeout_clks; 745 if (do_div(val, host->clock)) 746 target_timeout++; 747 target_timeout += val; 748 } 749 } 750 751 /* 752 * Figure out needed cycles. 753 * We do this in steps in order to fit inside a 32 bit int. 754 * The first step is the minimum timeout, which will have a 755 * minimum resolution of 6 bits: 756 * (1) 2^13*1000 > 2^22, 757 * (2) host->timeout_clk < 2^16 758 * => 759 * (1) / (2) > 2^6 760 */ 761 count = 0; 762 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 763 while (current_timeout < target_timeout) { 764 count++; 765 current_timeout <<= 1; 766 if (count >= 0xF) 767 break; 768 } 769 770 if (count >= 0xF) { 771 DBG("Too large timeout 0x%x requested for CMD%d!\n", 772 count, cmd->opcode); 773 count = 0xE; 774 } 775 776 return count; 777 } 778 779 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 780 { 781 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 782 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 783 784 if (host->flags & SDHCI_REQ_USE_DMA) 785 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 786 else 787 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 788 789 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 790 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 791 } 792 793 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 794 { 795 u8 count; 796 797 if (host->ops->set_timeout) { 798 host->ops->set_timeout(host, cmd); 799 } else { 800 count = sdhci_calc_timeout(host, cmd); 801 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 802 } 803 } 804 805 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 806 { 807 u8 ctrl; 808 struct mmc_data *data = cmd->data; 809 810 if (sdhci_data_line_cmd(cmd)) 811 sdhci_set_timeout(host, cmd); 812 813 if (!data) 814 return; 815 816 WARN_ON(host->data); 817 818 /* Sanity checks */ 819 BUG_ON(data->blksz * data->blocks > 524288); 820 BUG_ON(data->blksz > host->mmc->max_blk_size); 821 BUG_ON(data->blocks > 65535); 822 823 host->data = data; 824 host->data_early = 0; 825 host->data->bytes_xfered = 0; 826 827 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 828 struct scatterlist *sg; 829 unsigned int length_mask, offset_mask; 830 int i; 831 832 host->flags |= SDHCI_REQ_USE_DMA; 833 834 /* 835 * FIXME: This doesn't account for merging when mapping the 836 * scatterlist. 837 * 838 * The assumption here being that alignment and lengths are 839 * the same after DMA mapping to device address space. 840 */ 841 length_mask = 0; 842 offset_mask = 0; 843 if (host->flags & SDHCI_USE_ADMA) { 844 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 845 length_mask = 3; 846 /* 847 * As we use up to 3 byte chunks to work 848 * around alignment problems, we need to 849 * check the offset as well. 850 */ 851 offset_mask = 3; 852 } 853 } else { 854 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 855 length_mask = 3; 856 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 857 offset_mask = 3; 858 } 859 860 if (unlikely(length_mask | offset_mask)) { 861 for_each_sg(data->sg, sg, data->sg_len, i) { 862 if (sg->length & length_mask) { 863 DBG("Reverting to PIO because of transfer size (%d)\n", 864 sg->length); 865 host->flags &= ~SDHCI_REQ_USE_DMA; 866 break; 867 } 868 if (sg->offset & offset_mask) { 869 DBG("Reverting to PIO because of bad alignment\n"); 870 host->flags &= ~SDHCI_REQ_USE_DMA; 871 break; 872 } 873 } 874 } 875 } 876 877 if (host->flags & SDHCI_REQ_USE_DMA) { 878 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 879 880 if (sg_cnt <= 0) { 881 /* 882 * This only happens when someone fed 883 * us an invalid request. 884 */ 885 WARN_ON(1); 886 host->flags &= ~SDHCI_REQ_USE_DMA; 887 } else if (host->flags & SDHCI_USE_ADMA) { 888 sdhci_adma_table_pre(host, data, sg_cnt); 889 890 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS); 891 if (host->flags & SDHCI_USE_64_BIT_DMA) 892 sdhci_writel(host, 893 (u64)host->adma_addr >> 32, 894 SDHCI_ADMA_ADDRESS_HI); 895 } else { 896 WARN_ON(sg_cnt != 1); 897 sdhci_writel(host, sdhci_sdma_address(host), 898 SDHCI_DMA_ADDRESS); 899 } 900 } 901 902 /* 903 * Always adjust the DMA selection as some controllers 904 * (e.g. JMicron) can't do PIO properly when the selection 905 * is ADMA. 906 */ 907 if (host->version >= SDHCI_SPEC_200) { 908 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 909 ctrl &= ~SDHCI_CTRL_DMA_MASK; 910 if ((host->flags & SDHCI_REQ_USE_DMA) && 911 (host->flags & SDHCI_USE_ADMA)) { 912 if (host->flags & SDHCI_USE_64_BIT_DMA) 913 ctrl |= SDHCI_CTRL_ADMA64; 914 else 915 ctrl |= SDHCI_CTRL_ADMA32; 916 } else { 917 ctrl |= SDHCI_CTRL_SDMA; 918 } 919 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 920 } 921 922 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 923 int flags; 924 925 flags = SG_MITER_ATOMIC; 926 if (host->data->flags & MMC_DATA_READ) 927 flags |= SG_MITER_TO_SG; 928 else 929 flags |= SG_MITER_FROM_SG; 930 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 931 host->blocks = data->blocks; 932 } 933 934 sdhci_set_transfer_irqs(host); 935 936 /* Set the DMA boundary value and block size */ 937 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 938 SDHCI_BLOCK_SIZE); 939 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 940 } 941 942 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 943 struct mmc_request *mrq) 944 { 945 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 946 !mrq->cap_cmd_during_tfr; 947 } 948 949 static void sdhci_set_transfer_mode(struct sdhci_host *host, 950 struct mmc_command *cmd) 951 { 952 u16 mode = 0; 953 struct mmc_data *data = cmd->data; 954 955 if (data == NULL) { 956 if (host->quirks2 & 957 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 958 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 959 } else { 960 /* clear Auto CMD settings for no data CMDs */ 961 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 962 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 963 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 964 } 965 return; 966 } 967 968 WARN_ON(!host->data); 969 970 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 971 mode = SDHCI_TRNS_BLK_CNT_EN; 972 973 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 974 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 975 /* 976 * If we are sending CMD23, CMD12 never gets sent 977 * on successful completion (so no Auto-CMD12). 978 */ 979 if (sdhci_auto_cmd12(host, cmd->mrq) && 980 (cmd->opcode != SD_IO_RW_EXTENDED)) 981 mode |= SDHCI_TRNS_AUTO_CMD12; 982 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 983 mode |= SDHCI_TRNS_AUTO_CMD23; 984 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 985 } 986 } 987 988 if (data->flags & MMC_DATA_READ) 989 mode |= SDHCI_TRNS_READ; 990 if (host->flags & SDHCI_REQ_USE_DMA) 991 mode |= SDHCI_TRNS_DMA; 992 993 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 994 } 995 996 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 997 { 998 return (!(host->flags & SDHCI_DEVICE_DEAD) && 999 ((mrq->cmd && mrq->cmd->error) || 1000 (mrq->sbc && mrq->sbc->error) || 1001 (mrq->data && ((mrq->data->error && !mrq->data->stop) || 1002 (mrq->data->stop && mrq->data->stop->error))) || 1003 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1004 } 1005 1006 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1007 { 1008 int i; 1009 1010 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1011 if (host->mrqs_done[i] == mrq) { 1012 WARN_ON(1); 1013 return; 1014 } 1015 } 1016 1017 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1018 if (!host->mrqs_done[i]) { 1019 host->mrqs_done[i] = mrq; 1020 break; 1021 } 1022 } 1023 1024 WARN_ON(i >= SDHCI_MAX_MRQS); 1025 1026 tasklet_schedule(&host->finish_tasklet); 1027 } 1028 1029 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1030 { 1031 if (host->cmd && host->cmd->mrq == mrq) 1032 host->cmd = NULL; 1033 1034 if (host->data_cmd && host->data_cmd->mrq == mrq) 1035 host->data_cmd = NULL; 1036 1037 if (host->data && host->data->mrq == mrq) 1038 host->data = NULL; 1039 1040 if (sdhci_needs_reset(host, mrq)) 1041 host->pending_reset = true; 1042 1043 __sdhci_finish_mrq(host, mrq); 1044 } 1045 1046 static void sdhci_finish_data(struct sdhci_host *host) 1047 { 1048 struct mmc_command *data_cmd = host->data_cmd; 1049 struct mmc_data *data = host->data; 1050 1051 host->data = NULL; 1052 host->data_cmd = NULL; 1053 1054 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1055 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1056 sdhci_adma_table_post(host, data); 1057 1058 /* 1059 * The specification states that the block count register must 1060 * be updated, but it does not specify at what point in the 1061 * data flow. That makes the register entirely useless to read 1062 * back so we have to assume that nothing made it to the card 1063 * in the event of an error. 1064 */ 1065 if (data->error) 1066 data->bytes_xfered = 0; 1067 else 1068 data->bytes_xfered = data->blksz * data->blocks; 1069 1070 /* 1071 * Need to send CMD12 if - 1072 * a) open-ended multiblock transfer (no CMD23) 1073 * b) error in multiblock transfer 1074 */ 1075 if (data->stop && 1076 (data->error || 1077 !data->mrq->sbc)) { 1078 1079 /* 1080 * The controller needs a reset of internal state machines 1081 * upon error conditions. 1082 */ 1083 if (data->error) { 1084 if (!host->cmd || host->cmd == data_cmd) 1085 sdhci_do_reset(host, SDHCI_RESET_CMD); 1086 sdhci_do_reset(host, SDHCI_RESET_DATA); 1087 } 1088 1089 /* 1090 * 'cap_cmd_during_tfr' request must not use the command line 1091 * after mmc_command_done() has been called. It is upper layer's 1092 * responsibility to send the stop command if required. 1093 */ 1094 if (data->mrq->cap_cmd_during_tfr) { 1095 sdhci_finish_mrq(host, data->mrq); 1096 } else { 1097 /* Avoid triggering warning in sdhci_send_command() */ 1098 host->cmd = NULL; 1099 sdhci_send_command(host, data->stop); 1100 } 1101 } else { 1102 sdhci_finish_mrq(host, data->mrq); 1103 } 1104 } 1105 1106 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 1107 unsigned long timeout) 1108 { 1109 if (sdhci_data_line_cmd(mrq->cmd)) 1110 mod_timer(&host->data_timer, timeout); 1111 else 1112 mod_timer(&host->timer, timeout); 1113 } 1114 1115 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 1116 { 1117 if (sdhci_data_line_cmd(mrq->cmd)) 1118 del_timer(&host->data_timer); 1119 else 1120 del_timer(&host->timer); 1121 } 1122 1123 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1124 { 1125 int flags; 1126 u32 mask; 1127 unsigned long timeout; 1128 1129 WARN_ON(host->cmd); 1130 1131 /* Initially, a command has no error */ 1132 cmd->error = 0; 1133 1134 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1135 cmd->opcode == MMC_STOP_TRANSMISSION) 1136 cmd->flags |= MMC_RSP_BUSY; 1137 1138 /* Wait max 10 ms */ 1139 timeout = 10; 1140 1141 mask = SDHCI_CMD_INHIBIT; 1142 if (sdhci_data_line_cmd(cmd)) 1143 mask |= SDHCI_DATA_INHIBIT; 1144 1145 /* We shouldn't wait for data inihibit for stop commands, even 1146 though they might use busy signaling */ 1147 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1148 mask &= ~SDHCI_DATA_INHIBIT; 1149 1150 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1151 if (timeout == 0) { 1152 pr_err("%s: Controller never released inhibit bit(s).\n", 1153 mmc_hostname(host->mmc)); 1154 sdhci_dumpregs(host); 1155 cmd->error = -EIO; 1156 sdhci_finish_mrq(host, cmd->mrq); 1157 return; 1158 } 1159 timeout--; 1160 mdelay(1); 1161 } 1162 1163 timeout = jiffies; 1164 if (!cmd->data && cmd->busy_timeout > 9000) 1165 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1166 else 1167 timeout += 10 * HZ; 1168 sdhci_mod_timer(host, cmd->mrq, timeout); 1169 1170 host->cmd = cmd; 1171 if (sdhci_data_line_cmd(cmd)) { 1172 WARN_ON(host->data_cmd); 1173 host->data_cmd = cmd; 1174 } 1175 1176 sdhci_prepare_data(host, cmd); 1177 1178 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1179 1180 sdhci_set_transfer_mode(host, cmd); 1181 1182 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1183 pr_err("%s: Unsupported response type!\n", 1184 mmc_hostname(host->mmc)); 1185 cmd->error = -EINVAL; 1186 sdhci_finish_mrq(host, cmd->mrq); 1187 return; 1188 } 1189 1190 if (!(cmd->flags & MMC_RSP_PRESENT)) 1191 flags = SDHCI_CMD_RESP_NONE; 1192 else if (cmd->flags & MMC_RSP_136) 1193 flags = SDHCI_CMD_RESP_LONG; 1194 else if (cmd->flags & MMC_RSP_BUSY) 1195 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1196 else 1197 flags = SDHCI_CMD_RESP_SHORT; 1198 1199 if (cmd->flags & MMC_RSP_CRC) 1200 flags |= SDHCI_CMD_CRC; 1201 if (cmd->flags & MMC_RSP_OPCODE) 1202 flags |= SDHCI_CMD_INDEX; 1203 1204 /* CMD19 is special in that the Data Present Select should be set */ 1205 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1206 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1207 flags |= SDHCI_CMD_DATA; 1208 1209 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1210 } 1211 EXPORT_SYMBOL_GPL(sdhci_send_command); 1212 1213 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1214 { 1215 int i, reg; 1216 1217 for (i = 0; i < 4; i++) { 1218 reg = SDHCI_RESPONSE + (3 - i) * 4; 1219 cmd->resp[i] = sdhci_readl(host, reg); 1220 } 1221 1222 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1223 return; 1224 1225 /* CRC is stripped so we need to do some shifting */ 1226 for (i = 0; i < 4; i++) { 1227 cmd->resp[i] <<= 8; 1228 if (i != 3) 1229 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1230 } 1231 } 1232 1233 static void sdhci_finish_command(struct sdhci_host *host) 1234 { 1235 struct mmc_command *cmd = host->cmd; 1236 1237 host->cmd = NULL; 1238 1239 if (cmd->flags & MMC_RSP_PRESENT) { 1240 if (cmd->flags & MMC_RSP_136) { 1241 sdhci_read_rsp_136(host, cmd); 1242 } else { 1243 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1244 } 1245 } 1246 1247 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1248 mmc_command_done(host->mmc, cmd->mrq); 1249 1250 /* 1251 * The host can send and interrupt when the busy state has 1252 * ended, allowing us to wait without wasting CPU cycles. 1253 * The busy signal uses DAT0 so this is similar to waiting 1254 * for data to complete. 1255 * 1256 * Note: The 1.0 specification is a bit ambiguous about this 1257 * feature so there might be some problems with older 1258 * controllers. 1259 */ 1260 if (cmd->flags & MMC_RSP_BUSY) { 1261 if (cmd->data) { 1262 DBG("Cannot wait for busy signal when also doing a data transfer"); 1263 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1264 cmd == host->data_cmd) { 1265 /* Command complete before busy is ended */ 1266 return; 1267 } 1268 } 1269 1270 /* Finished CMD23, now send actual command. */ 1271 if (cmd == cmd->mrq->sbc) { 1272 sdhci_send_command(host, cmd->mrq->cmd); 1273 } else { 1274 1275 /* Processed actual command. */ 1276 if (host->data && host->data_early) 1277 sdhci_finish_data(host); 1278 1279 if (!cmd->data) 1280 sdhci_finish_mrq(host, cmd->mrq); 1281 } 1282 } 1283 1284 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1285 { 1286 u16 preset = 0; 1287 1288 switch (host->timing) { 1289 case MMC_TIMING_UHS_SDR12: 1290 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1291 break; 1292 case MMC_TIMING_UHS_SDR25: 1293 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1294 break; 1295 case MMC_TIMING_UHS_SDR50: 1296 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1297 break; 1298 case MMC_TIMING_UHS_SDR104: 1299 case MMC_TIMING_MMC_HS200: 1300 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1301 break; 1302 case MMC_TIMING_UHS_DDR50: 1303 case MMC_TIMING_MMC_DDR52: 1304 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1305 break; 1306 case MMC_TIMING_MMC_HS400: 1307 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1308 break; 1309 default: 1310 pr_warn("%s: Invalid UHS-I mode selected\n", 1311 mmc_hostname(host->mmc)); 1312 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1313 break; 1314 } 1315 return preset; 1316 } 1317 1318 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1319 unsigned int *actual_clock) 1320 { 1321 int div = 0; /* Initialized for compiler warning */ 1322 int real_div = div, clk_mul = 1; 1323 u16 clk = 0; 1324 bool switch_base_clk = false; 1325 1326 if (host->version >= SDHCI_SPEC_300) { 1327 if (host->preset_enabled) { 1328 u16 pre_val; 1329 1330 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1331 pre_val = sdhci_get_preset_value(host); 1332 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1333 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1334 if (host->clk_mul && 1335 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1336 clk = SDHCI_PROG_CLOCK_MODE; 1337 real_div = div + 1; 1338 clk_mul = host->clk_mul; 1339 } else { 1340 real_div = max_t(int, 1, div << 1); 1341 } 1342 goto clock_set; 1343 } 1344 1345 /* 1346 * Check if the Host Controller supports Programmable Clock 1347 * Mode. 1348 */ 1349 if (host->clk_mul) { 1350 for (div = 1; div <= 1024; div++) { 1351 if ((host->max_clk * host->clk_mul / div) 1352 <= clock) 1353 break; 1354 } 1355 if ((host->max_clk * host->clk_mul / div) <= clock) { 1356 /* 1357 * Set Programmable Clock Mode in the Clock 1358 * Control register. 1359 */ 1360 clk = SDHCI_PROG_CLOCK_MODE; 1361 real_div = div; 1362 clk_mul = host->clk_mul; 1363 div--; 1364 } else { 1365 /* 1366 * Divisor can be too small to reach clock 1367 * speed requirement. Then use the base clock. 1368 */ 1369 switch_base_clk = true; 1370 } 1371 } 1372 1373 if (!host->clk_mul || switch_base_clk) { 1374 /* Version 3.00 divisors must be a multiple of 2. */ 1375 if (host->max_clk <= clock) 1376 div = 1; 1377 else { 1378 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1379 div += 2) { 1380 if ((host->max_clk / div) <= clock) 1381 break; 1382 } 1383 } 1384 real_div = div; 1385 div >>= 1; 1386 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1387 && !div && host->max_clk <= 25000000) 1388 div = 1; 1389 } 1390 } else { 1391 /* Version 2.00 divisors must be a power of 2. */ 1392 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1393 if ((host->max_clk / div) <= clock) 1394 break; 1395 } 1396 real_div = div; 1397 div >>= 1; 1398 } 1399 1400 clock_set: 1401 if (real_div) 1402 *actual_clock = (host->max_clk * clk_mul) / real_div; 1403 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1404 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1405 << SDHCI_DIVIDER_HI_SHIFT; 1406 1407 return clk; 1408 } 1409 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1410 1411 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1412 { 1413 ktime_t timeout; 1414 1415 clk |= SDHCI_CLOCK_INT_EN; 1416 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1417 1418 /* Wait max 20 ms */ 1419 timeout = ktime_add_ms(ktime_get(), 20); 1420 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1421 & SDHCI_CLOCK_INT_STABLE)) { 1422 if (ktime_after(ktime_get(), timeout)) { 1423 pr_err("%s: Internal clock never stabilised.\n", 1424 mmc_hostname(host->mmc)); 1425 sdhci_dumpregs(host); 1426 return; 1427 } 1428 udelay(10); 1429 } 1430 1431 clk |= SDHCI_CLOCK_CARD_EN; 1432 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1433 } 1434 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1435 1436 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1437 { 1438 u16 clk; 1439 1440 host->mmc->actual_clock = 0; 1441 1442 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1443 1444 if (clock == 0) 1445 return; 1446 1447 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1448 sdhci_enable_clk(host, clk); 1449 } 1450 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1451 1452 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1453 unsigned short vdd) 1454 { 1455 struct mmc_host *mmc = host->mmc; 1456 1457 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1458 1459 if (mode != MMC_POWER_OFF) 1460 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1461 else 1462 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1463 } 1464 1465 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 1466 unsigned short vdd) 1467 { 1468 u8 pwr = 0; 1469 1470 if (mode != MMC_POWER_OFF) { 1471 switch (1 << vdd) { 1472 case MMC_VDD_165_195: 1473 /* 1474 * Without a regulator, SDHCI does not support 2.0v 1475 * so we only get here if the driver deliberately 1476 * added the 2.0v range to ocr_avail. Map it to 1.8v 1477 * for the purpose of turning on the power. 1478 */ 1479 case MMC_VDD_20_21: 1480 pwr = SDHCI_POWER_180; 1481 break; 1482 case MMC_VDD_29_30: 1483 case MMC_VDD_30_31: 1484 pwr = SDHCI_POWER_300; 1485 break; 1486 case MMC_VDD_32_33: 1487 case MMC_VDD_33_34: 1488 pwr = SDHCI_POWER_330; 1489 break; 1490 default: 1491 WARN(1, "%s: Invalid vdd %#x\n", 1492 mmc_hostname(host->mmc), vdd); 1493 break; 1494 } 1495 } 1496 1497 if (host->pwr == pwr) 1498 return; 1499 1500 host->pwr = pwr; 1501 1502 if (pwr == 0) { 1503 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1504 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1505 sdhci_runtime_pm_bus_off(host); 1506 } else { 1507 /* 1508 * Spec says that we should clear the power reg before setting 1509 * a new value. Some controllers don't seem to like this though. 1510 */ 1511 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1512 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1513 1514 /* 1515 * At least the Marvell CaFe chip gets confused if we set the 1516 * voltage and set turn on power at the same time, so set the 1517 * voltage first. 1518 */ 1519 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1520 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1521 1522 pwr |= SDHCI_POWER_ON; 1523 1524 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1525 1526 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1527 sdhci_runtime_pm_bus_on(host); 1528 1529 /* 1530 * Some controllers need an extra 10ms delay of 10ms before 1531 * they can apply clock after applying power 1532 */ 1533 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1534 mdelay(10); 1535 } 1536 } 1537 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 1538 1539 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1540 unsigned short vdd) 1541 { 1542 if (IS_ERR(host->mmc->supply.vmmc)) 1543 sdhci_set_power_noreg(host, mode, vdd); 1544 else 1545 sdhci_set_power_reg(host, mode, vdd); 1546 } 1547 EXPORT_SYMBOL_GPL(sdhci_set_power); 1548 1549 /*****************************************************************************\ 1550 * * 1551 * MMC callbacks * 1552 * * 1553 \*****************************************************************************/ 1554 1555 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1556 { 1557 struct sdhci_host *host; 1558 int present; 1559 unsigned long flags; 1560 1561 host = mmc_priv(mmc); 1562 1563 /* Firstly check card presence */ 1564 present = mmc->ops->get_cd(mmc); 1565 1566 spin_lock_irqsave(&host->lock, flags); 1567 1568 sdhci_led_activate(host); 1569 1570 /* 1571 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1572 * requests if Auto-CMD12 is enabled. 1573 */ 1574 if (sdhci_auto_cmd12(host, mrq)) { 1575 if (mrq->stop) { 1576 mrq->data->stop = NULL; 1577 mrq->stop = NULL; 1578 } 1579 } 1580 1581 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1582 mrq->cmd->error = -ENOMEDIUM; 1583 sdhci_finish_mrq(host, mrq); 1584 } else { 1585 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1586 sdhci_send_command(host, mrq->sbc); 1587 else 1588 sdhci_send_command(host, mrq->cmd); 1589 } 1590 1591 mmiowb(); 1592 spin_unlock_irqrestore(&host->lock, flags); 1593 } 1594 1595 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1596 { 1597 u8 ctrl; 1598 1599 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1600 if (width == MMC_BUS_WIDTH_8) { 1601 ctrl &= ~SDHCI_CTRL_4BITBUS; 1602 ctrl |= SDHCI_CTRL_8BITBUS; 1603 } else { 1604 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 1605 ctrl &= ~SDHCI_CTRL_8BITBUS; 1606 if (width == MMC_BUS_WIDTH_4) 1607 ctrl |= SDHCI_CTRL_4BITBUS; 1608 else 1609 ctrl &= ~SDHCI_CTRL_4BITBUS; 1610 } 1611 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1612 } 1613 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1614 1615 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1616 { 1617 u16 ctrl_2; 1618 1619 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1620 /* Select Bus Speed Mode for host */ 1621 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1622 if ((timing == MMC_TIMING_MMC_HS200) || 1623 (timing == MMC_TIMING_UHS_SDR104)) 1624 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1625 else if (timing == MMC_TIMING_UHS_SDR12) 1626 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1627 else if (timing == MMC_TIMING_UHS_SDR25) 1628 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1629 else if (timing == MMC_TIMING_UHS_SDR50) 1630 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1631 else if ((timing == MMC_TIMING_UHS_DDR50) || 1632 (timing == MMC_TIMING_MMC_DDR52)) 1633 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1634 else if (timing == MMC_TIMING_MMC_HS400) 1635 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1636 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1637 } 1638 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1639 1640 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1641 { 1642 struct sdhci_host *host = mmc_priv(mmc); 1643 u8 ctrl; 1644 1645 if (ios->power_mode == MMC_POWER_UNDEFINED) 1646 return; 1647 1648 if (host->flags & SDHCI_DEVICE_DEAD) { 1649 if (!IS_ERR(mmc->supply.vmmc) && 1650 ios->power_mode == MMC_POWER_OFF) 1651 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1652 return; 1653 } 1654 1655 /* 1656 * Reset the chip on each power off. 1657 * Should clear out any weird states. 1658 */ 1659 if (ios->power_mode == MMC_POWER_OFF) { 1660 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1661 sdhci_reinit(host); 1662 } 1663 1664 if (host->version >= SDHCI_SPEC_300 && 1665 (ios->power_mode == MMC_POWER_UP) && 1666 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1667 sdhci_enable_preset_value(host, false); 1668 1669 if (!ios->clock || ios->clock != host->clock) { 1670 host->ops->set_clock(host, ios->clock); 1671 host->clock = ios->clock; 1672 1673 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1674 host->clock) { 1675 host->timeout_clk = host->mmc->actual_clock ? 1676 host->mmc->actual_clock / 1000 : 1677 host->clock / 1000; 1678 host->mmc->max_busy_timeout = 1679 host->ops->get_max_timeout_count ? 1680 host->ops->get_max_timeout_count(host) : 1681 1 << 27; 1682 host->mmc->max_busy_timeout /= host->timeout_clk; 1683 } 1684 } 1685 1686 if (host->ops->set_power) 1687 host->ops->set_power(host, ios->power_mode, ios->vdd); 1688 else 1689 sdhci_set_power(host, ios->power_mode, ios->vdd); 1690 1691 if (host->ops->platform_send_init_74_clocks) 1692 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1693 1694 host->ops->set_bus_width(host, ios->bus_width); 1695 1696 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1697 1698 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 1699 if (ios->timing == MMC_TIMING_SD_HS || 1700 ios->timing == MMC_TIMING_MMC_HS || 1701 ios->timing == MMC_TIMING_MMC_HS400 || 1702 ios->timing == MMC_TIMING_MMC_HS200 || 1703 ios->timing == MMC_TIMING_MMC_DDR52 || 1704 ios->timing == MMC_TIMING_UHS_SDR50 || 1705 ios->timing == MMC_TIMING_UHS_SDR104 || 1706 ios->timing == MMC_TIMING_UHS_DDR50 || 1707 ios->timing == MMC_TIMING_UHS_SDR25) 1708 ctrl |= SDHCI_CTRL_HISPD; 1709 else 1710 ctrl &= ~SDHCI_CTRL_HISPD; 1711 } 1712 1713 if (host->version >= SDHCI_SPEC_300) { 1714 u16 clk, ctrl_2; 1715 1716 if (!host->preset_enabled) { 1717 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1718 /* 1719 * We only need to set Driver Strength if the 1720 * preset value enable is not set. 1721 */ 1722 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1723 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1724 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1725 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1726 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 1727 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1728 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1729 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1730 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 1731 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 1732 else { 1733 pr_warn("%s: invalid driver type, default to driver type B\n", 1734 mmc_hostname(mmc)); 1735 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1736 } 1737 1738 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1739 } else { 1740 /* 1741 * According to SDHC Spec v3.00, if the Preset Value 1742 * Enable in the Host Control 2 register is set, we 1743 * need to reset SD Clock Enable before changing High 1744 * Speed Enable to avoid generating clock gliches. 1745 */ 1746 1747 /* Reset SD Clock Enable */ 1748 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1749 clk &= ~SDHCI_CLOCK_CARD_EN; 1750 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1751 1752 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1753 1754 /* Re-enable SD Clock */ 1755 host->ops->set_clock(host, host->clock); 1756 } 1757 1758 /* Reset SD Clock Enable */ 1759 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1760 clk &= ~SDHCI_CLOCK_CARD_EN; 1761 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1762 1763 host->ops->set_uhs_signaling(host, ios->timing); 1764 host->timing = ios->timing; 1765 1766 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1767 ((ios->timing == MMC_TIMING_UHS_SDR12) || 1768 (ios->timing == MMC_TIMING_UHS_SDR25) || 1769 (ios->timing == MMC_TIMING_UHS_SDR50) || 1770 (ios->timing == MMC_TIMING_UHS_SDR104) || 1771 (ios->timing == MMC_TIMING_UHS_DDR50) || 1772 (ios->timing == MMC_TIMING_MMC_DDR52))) { 1773 u16 preset; 1774 1775 sdhci_enable_preset_value(host, true); 1776 preset = sdhci_get_preset_value(host); 1777 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 1778 >> SDHCI_PRESET_DRV_SHIFT; 1779 } 1780 1781 /* Re-enable SD Clock */ 1782 host->ops->set_clock(host, host->clock); 1783 } else 1784 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1785 1786 /* 1787 * Some (ENE) controllers go apeshit on some ios operation, 1788 * signalling timeout and CRC errors even on CMD0. Resetting 1789 * it on each ios seems to solve the problem. 1790 */ 1791 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1792 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1793 1794 mmiowb(); 1795 } 1796 EXPORT_SYMBOL_GPL(sdhci_set_ios); 1797 1798 static int sdhci_get_cd(struct mmc_host *mmc) 1799 { 1800 struct sdhci_host *host = mmc_priv(mmc); 1801 int gpio_cd = mmc_gpio_get_cd(mmc); 1802 1803 if (host->flags & SDHCI_DEVICE_DEAD) 1804 return 0; 1805 1806 /* If nonremovable, assume that the card is always present. */ 1807 if (!mmc_card_is_removable(host->mmc)) 1808 return 1; 1809 1810 /* 1811 * Try slot gpio detect, if defined it take precedence 1812 * over build in controller functionality 1813 */ 1814 if (gpio_cd >= 0) 1815 return !!gpio_cd; 1816 1817 /* If polling, assume that the card is always present. */ 1818 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1819 return 1; 1820 1821 /* Host native card detect */ 1822 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1823 } 1824 1825 static int sdhci_check_ro(struct sdhci_host *host) 1826 { 1827 unsigned long flags; 1828 int is_readonly; 1829 1830 spin_lock_irqsave(&host->lock, flags); 1831 1832 if (host->flags & SDHCI_DEVICE_DEAD) 1833 is_readonly = 0; 1834 else if (host->ops->get_ro) 1835 is_readonly = host->ops->get_ro(host); 1836 else 1837 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1838 & SDHCI_WRITE_PROTECT); 1839 1840 spin_unlock_irqrestore(&host->lock, flags); 1841 1842 /* This quirk needs to be replaced by a callback-function later */ 1843 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1844 !is_readonly : is_readonly; 1845 } 1846 1847 #define SAMPLE_COUNT 5 1848 1849 static int sdhci_get_ro(struct mmc_host *mmc) 1850 { 1851 struct sdhci_host *host = mmc_priv(mmc); 1852 int i, ro_count; 1853 1854 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 1855 return sdhci_check_ro(host); 1856 1857 ro_count = 0; 1858 for (i = 0; i < SAMPLE_COUNT; i++) { 1859 if (sdhci_check_ro(host)) { 1860 if (++ro_count > SAMPLE_COUNT / 2) 1861 return 1; 1862 } 1863 msleep(30); 1864 } 1865 return 0; 1866 } 1867 1868 static void sdhci_hw_reset(struct mmc_host *mmc) 1869 { 1870 struct sdhci_host *host = mmc_priv(mmc); 1871 1872 if (host->ops && host->ops->hw_reset) 1873 host->ops->hw_reset(host); 1874 } 1875 1876 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 1877 { 1878 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 1879 if (enable) 1880 host->ier |= SDHCI_INT_CARD_INT; 1881 else 1882 host->ier &= ~SDHCI_INT_CARD_INT; 1883 1884 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1885 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1886 mmiowb(); 1887 } 1888 } 1889 1890 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1891 { 1892 struct sdhci_host *host = mmc_priv(mmc); 1893 unsigned long flags; 1894 1895 if (enable) 1896 pm_runtime_get_noresume(host->mmc->parent); 1897 1898 spin_lock_irqsave(&host->lock, flags); 1899 if (enable) 1900 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1901 else 1902 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 1903 1904 sdhci_enable_sdio_irq_nolock(host, enable); 1905 spin_unlock_irqrestore(&host->lock, flags); 1906 1907 if (!enable) 1908 pm_runtime_put_noidle(host->mmc->parent); 1909 } 1910 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 1911 1912 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1913 struct mmc_ios *ios) 1914 { 1915 struct sdhci_host *host = mmc_priv(mmc); 1916 u16 ctrl; 1917 int ret; 1918 1919 /* 1920 * Signal Voltage Switching is only applicable for Host Controllers 1921 * v3.00 and above. 1922 */ 1923 if (host->version < SDHCI_SPEC_300) 1924 return 0; 1925 1926 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1927 1928 switch (ios->signal_voltage) { 1929 case MMC_SIGNAL_VOLTAGE_330: 1930 if (!(host->flags & SDHCI_SIGNALING_330)) 1931 return -EINVAL; 1932 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 1933 ctrl &= ~SDHCI_CTRL_VDD_180; 1934 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1935 1936 if (!IS_ERR(mmc->supply.vqmmc)) { 1937 ret = mmc_regulator_set_vqmmc(mmc, ios); 1938 if (ret) { 1939 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 1940 mmc_hostname(mmc)); 1941 return -EIO; 1942 } 1943 } 1944 /* Wait for 5ms */ 1945 usleep_range(5000, 5500); 1946 1947 /* 3.3V regulator output should be stable within 5 ms */ 1948 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1949 if (!(ctrl & SDHCI_CTRL_VDD_180)) 1950 return 0; 1951 1952 pr_warn("%s: 3.3V regulator output did not became stable\n", 1953 mmc_hostname(mmc)); 1954 1955 return -EAGAIN; 1956 case MMC_SIGNAL_VOLTAGE_180: 1957 if (!(host->flags & SDHCI_SIGNALING_180)) 1958 return -EINVAL; 1959 if (!IS_ERR(mmc->supply.vqmmc)) { 1960 ret = mmc_regulator_set_vqmmc(mmc, ios); 1961 if (ret) { 1962 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 1963 mmc_hostname(mmc)); 1964 return -EIO; 1965 } 1966 } 1967 1968 /* 1969 * Enable 1.8V Signal Enable in the Host Control2 1970 * register 1971 */ 1972 ctrl |= SDHCI_CTRL_VDD_180; 1973 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 1974 1975 /* Some controller need to do more when switching */ 1976 if (host->ops->voltage_switch) 1977 host->ops->voltage_switch(host); 1978 1979 /* 1.8V regulator output should be stable within 5 ms */ 1980 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1981 if (ctrl & SDHCI_CTRL_VDD_180) 1982 return 0; 1983 1984 pr_warn("%s: 1.8V regulator output did not became stable\n", 1985 mmc_hostname(mmc)); 1986 1987 return -EAGAIN; 1988 case MMC_SIGNAL_VOLTAGE_120: 1989 if (!(host->flags & SDHCI_SIGNALING_120)) 1990 return -EINVAL; 1991 if (!IS_ERR(mmc->supply.vqmmc)) { 1992 ret = mmc_regulator_set_vqmmc(mmc, ios); 1993 if (ret) { 1994 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 1995 mmc_hostname(mmc)); 1996 return -EIO; 1997 } 1998 } 1999 return 0; 2000 default: 2001 /* No signal voltage switch required */ 2002 return 0; 2003 } 2004 } 2005 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2006 2007 static int sdhci_card_busy(struct mmc_host *mmc) 2008 { 2009 struct sdhci_host *host = mmc_priv(mmc); 2010 u32 present_state; 2011 2012 /* Check whether DAT[0] is 0 */ 2013 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2014 2015 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2016 } 2017 2018 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2019 { 2020 struct sdhci_host *host = mmc_priv(mmc); 2021 unsigned long flags; 2022 2023 spin_lock_irqsave(&host->lock, flags); 2024 host->flags |= SDHCI_HS400_TUNING; 2025 spin_unlock_irqrestore(&host->lock, flags); 2026 2027 return 0; 2028 } 2029 2030 static void sdhci_start_tuning(struct sdhci_host *host) 2031 { 2032 u16 ctrl; 2033 2034 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2035 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2036 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2037 ctrl |= SDHCI_CTRL_TUNED_CLK; 2038 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2039 2040 /* 2041 * As per the Host Controller spec v3.00, tuning command 2042 * generates Buffer Read Ready interrupt, so enable that. 2043 * 2044 * Note: The spec clearly says that when tuning sequence 2045 * is being performed, the controller does not generate 2046 * interrupts other than Buffer Read Ready interrupt. But 2047 * to make sure we don't hit a controller bug, we _only_ 2048 * enable Buffer Read Ready interrupt here. 2049 */ 2050 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2051 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2052 } 2053 2054 static void sdhci_end_tuning(struct sdhci_host *host) 2055 { 2056 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2057 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2058 } 2059 2060 static void sdhci_reset_tuning(struct sdhci_host *host) 2061 { 2062 u16 ctrl; 2063 2064 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2065 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2066 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2067 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2068 } 2069 2070 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2071 { 2072 sdhci_reset_tuning(host); 2073 2074 sdhci_do_reset(host, SDHCI_RESET_CMD); 2075 sdhci_do_reset(host, SDHCI_RESET_DATA); 2076 2077 sdhci_end_tuning(host); 2078 2079 mmc_abort_tuning(host->mmc, opcode); 2080 } 2081 2082 /* 2083 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2084 * tuning command does not have a data payload (or rather the hardware does it 2085 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2086 * interrupt setup is different to other commands and there is no timeout 2087 * interrupt so special handling is needed. 2088 */ 2089 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2090 { 2091 struct mmc_host *mmc = host->mmc; 2092 struct mmc_command cmd = {}; 2093 struct mmc_request mrq = {}; 2094 unsigned long flags; 2095 u32 b = host->sdma_boundary; 2096 2097 spin_lock_irqsave(&host->lock, flags); 2098 2099 cmd.opcode = opcode; 2100 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2101 cmd.mrq = &mrq; 2102 2103 mrq.cmd = &cmd; 2104 /* 2105 * In response to CMD19, the card sends 64 bytes of tuning 2106 * block to the Host Controller. So we set the block size 2107 * to 64 here. 2108 */ 2109 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2110 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2111 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2112 else 2113 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2114 2115 /* 2116 * The tuning block is sent by the card to the host controller. 2117 * So we set the TRNS_READ bit in the Transfer Mode register. 2118 * This also takes care of setting DMA Enable and Multi Block 2119 * Select in the same register to 0. 2120 */ 2121 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2122 2123 sdhci_send_command(host, &cmd); 2124 2125 host->cmd = NULL; 2126 2127 sdhci_del_timer(host, &mrq); 2128 2129 host->tuning_done = 0; 2130 2131 mmiowb(); 2132 spin_unlock_irqrestore(&host->lock, flags); 2133 2134 /* Wait for Buffer Read Ready interrupt */ 2135 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2136 msecs_to_jiffies(50)); 2137 2138 } 2139 2140 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2141 { 2142 int i; 2143 2144 /* 2145 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2146 * of loops reaches 40 times. 2147 */ 2148 for (i = 0; i < MAX_TUNING_LOOP; i++) { 2149 u16 ctrl; 2150 2151 sdhci_send_tuning(host, opcode); 2152 2153 if (!host->tuning_done) { 2154 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", 2155 mmc_hostname(host->mmc)); 2156 sdhci_abort_tuning(host, opcode); 2157 return; 2158 } 2159 2160 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2161 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2162 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2163 return; /* Success! */ 2164 break; 2165 } 2166 2167 /* Spec does not require a delay between tuning cycles */ 2168 if (host->tuning_delay > 0) 2169 mdelay(host->tuning_delay); 2170 } 2171 2172 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2173 mmc_hostname(host->mmc)); 2174 sdhci_reset_tuning(host); 2175 } 2176 2177 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2178 { 2179 struct sdhci_host *host = mmc_priv(mmc); 2180 int err = 0; 2181 unsigned int tuning_count = 0; 2182 bool hs400_tuning; 2183 2184 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2185 2186 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2187 tuning_count = host->tuning_count; 2188 2189 /* 2190 * The Host Controller needs tuning in case of SDR104 and DDR50 2191 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2192 * the Capabilities register. 2193 * If the Host Controller supports the HS200 mode then the 2194 * tuning function has to be executed. 2195 */ 2196 switch (host->timing) { 2197 /* HS400 tuning is done in HS200 mode */ 2198 case MMC_TIMING_MMC_HS400: 2199 err = -EINVAL; 2200 goto out; 2201 2202 case MMC_TIMING_MMC_HS200: 2203 /* 2204 * Periodic re-tuning for HS400 is not expected to be needed, so 2205 * disable it here. 2206 */ 2207 if (hs400_tuning) 2208 tuning_count = 0; 2209 break; 2210 2211 case MMC_TIMING_UHS_SDR104: 2212 case MMC_TIMING_UHS_DDR50: 2213 break; 2214 2215 case MMC_TIMING_UHS_SDR50: 2216 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2217 break; 2218 /* FALLTHROUGH */ 2219 2220 default: 2221 goto out; 2222 } 2223 2224 if (host->ops->platform_execute_tuning) { 2225 err = host->ops->platform_execute_tuning(host, opcode); 2226 goto out; 2227 } 2228 2229 host->mmc->retune_period = tuning_count; 2230 2231 if (host->tuning_delay < 0) 2232 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2233 2234 sdhci_start_tuning(host); 2235 2236 __sdhci_execute_tuning(host, opcode); 2237 2238 sdhci_end_tuning(host); 2239 out: 2240 host->flags &= ~SDHCI_HS400_TUNING; 2241 2242 return err; 2243 } 2244 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2245 2246 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2247 { 2248 /* Host Controller v3.00 defines preset value registers */ 2249 if (host->version < SDHCI_SPEC_300) 2250 return; 2251 2252 /* 2253 * We only enable or disable Preset Value if they are not already 2254 * enabled or disabled respectively. Otherwise, we bail out. 2255 */ 2256 if (host->preset_enabled != enable) { 2257 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2258 2259 if (enable) 2260 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2261 else 2262 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2263 2264 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2265 2266 if (enable) 2267 host->flags |= SDHCI_PV_ENABLED; 2268 else 2269 host->flags &= ~SDHCI_PV_ENABLED; 2270 2271 host->preset_enabled = enable; 2272 } 2273 } 2274 2275 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2276 int err) 2277 { 2278 struct sdhci_host *host = mmc_priv(mmc); 2279 struct mmc_data *data = mrq->data; 2280 2281 if (data->host_cookie != COOKIE_UNMAPPED) 2282 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2283 mmc_get_dma_dir(data)); 2284 2285 data->host_cookie = COOKIE_UNMAPPED; 2286 } 2287 2288 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2289 { 2290 struct sdhci_host *host = mmc_priv(mmc); 2291 2292 mrq->data->host_cookie = COOKIE_UNMAPPED; 2293 2294 /* 2295 * No pre-mapping in the pre hook if we're using the bounce buffer, 2296 * for that we would need two bounce buffers since one buffer is 2297 * in flight when this is getting called. 2298 */ 2299 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2300 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2301 } 2302 2303 static inline bool sdhci_has_requests(struct sdhci_host *host) 2304 { 2305 return host->cmd || host->data_cmd; 2306 } 2307 2308 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2309 { 2310 if (host->data_cmd) { 2311 host->data_cmd->error = err; 2312 sdhci_finish_mrq(host, host->data_cmd->mrq); 2313 } 2314 2315 if (host->cmd) { 2316 host->cmd->error = err; 2317 sdhci_finish_mrq(host, host->cmd->mrq); 2318 } 2319 } 2320 2321 static void sdhci_card_event(struct mmc_host *mmc) 2322 { 2323 struct sdhci_host *host = mmc_priv(mmc); 2324 unsigned long flags; 2325 int present; 2326 2327 /* First check if client has provided their own card event */ 2328 if (host->ops->card_event) 2329 host->ops->card_event(host); 2330 2331 present = mmc->ops->get_cd(mmc); 2332 2333 spin_lock_irqsave(&host->lock, flags); 2334 2335 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2336 if (sdhci_has_requests(host) && !present) { 2337 pr_err("%s: Card removed during transfer!\n", 2338 mmc_hostname(host->mmc)); 2339 pr_err("%s: Resetting controller.\n", 2340 mmc_hostname(host->mmc)); 2341 2342 sdhci_do_reset(host, SDHCI_RESET_CMD); 2343 sdhci_do_reset(host, SDHCI_RESET_DATA); 2344 2345 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2346 } 2347 2348 spin_unlock_irqrestore(&host->lock, flags); 2349 } 2350 2351 static const struct mmc_host_ops sdhci_ops = { 2352 .request = sdhci_request, 2353 .post_req = sdhci_post_req, 2354 .pre_req = sdhci_pre_req, 2355 .set_ios = sdhci_set_ios, 2356 .get_cd = sdhci_get_cd, 2357 .get_ro = sdhci_get_ro, 2358 .hw_reset = sdhci_hw_reset, 2359 .enable_sdio_irq = sdhci_enable_sdio_irq, 2360 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2361 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2362 .execute_tuning = sdhci_execute_tuning, 2363 .card_event = sdhci_card_event, 2364 .card_busy = sdhci_card_busy, 2365 }; 2366 2367 /*****************************************************************************\ 2368 * * 2369 * Tasklets * 2370 * * 2371 \*****************************************************************************/ 2372 2373 static bool sdhci_request_done(struct sdhci_host *host) 2374 { 2375 unsigned long flags; 2376 struct mmc_request *mrq; 2377 int i; 2378 2379 spin_lock_irqsave(&host->lock, flags); 2380 2381 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2382 mrq = host->mrqs_done[i]; 2383 if (mrq) 2384 break; 2385 } 2386 2387 if (!mrq) { 2388 spin_unlock_irqrestore(&host->lock, flags); 2389 return true; 2390 } 2391 2392 sdhci_del_timer(host, mrq); 2393 2394 /* 2395 * Always unmap the data buffers if they were mapped by 2396 * sdhci_prepare_data() whenever we finish with a request. 2397 * This avoids leaking DMA mappings on error. 2398 */ 2399 if (host->flags & SDHCI_REQ_USE_DMA) { 2400 struct mmc_data *data = mrq->data; 2401 2402 if (data && data->host_cookie == COOKIE_MAPPED) { 2403 if (host->bounce_buffer) { 2404 /* 2405 * On reads, copy the bounced data into the 2406 * sglist 2407 */ 2408 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 2409 unsigned int length = data->bytes_xfered; 2410 2411 if (length > host->bounce_buffer_size) { 2412 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 2413 mmc_hostname(host->mmc), 2414 host->bounce_buffer_size, 2415 data->bytes_xfered); 2416 /* Cap it down and continue */ 2417 length = host->bounce_buffer_size; 2418 } 2419 dma_sync_single_for_cpu( 2420 host->mmc->parent, 2421 host->bounce_addr, 2422 host->bounce_buffer_size, 2423 DMA_FROM_DEVICE); 2424 sg_copy_from_buffer(data->sg, 2425 data->sg_len, 2426 host->bounce_buffer, 2427 length); 2428 } else { 2429 /* No copying, just switch ownership */ 2430 dma_sync_single_for_cpu( 2431 host->mmc->parent, 2432 host->bounce_addr, 2433 host->bounce_buffer_size, 2434 mmc_get_dma_dir(data)); 2435 } 2436 } else { 2437 /* Unmap the raw data */ 2438 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 2439 data->sg_len, 2440 mmc_get_dma_dir(data)); 2441 } 2442 data->host_cookie = COOKIE_UNMAPPED; 2443 } 2444 } 2445 2446 /* 2447 * The controller needs a reset of internal state machines 2448 * upon error conditions. 2449 */ 2450 if (sdhci_needs_reset(host, mrq)) { 2451 /* 2452 * Do not finish until command and data lines are available for 2453 * reset. Note there can only be one other mrq, so it cannot 2454 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 2455 * would both be null. 2456 */ 2457 if (host->cmd || host->data_cmd) { 2458 spin_unlock_irqrestore(&host->lock, flags); 2459 return true; 2460 } 2461 2462 /* Some controllers need this kick or reset won't work here */ 2463 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2464 /* This is to force an update */ 2465 host->ops->set_clock(host, host->clock); 2466 2467 /* Spec says we should do both at the same time, but Ricoh 2468 controllers do not like that. */ 2469 sdhci_do_reset(host, SDHCI_RESET_CMD); 2470 sdhci_do_reset(host, SDHCI_RESET_DATA); 2471 2472 host->pending_reset = false; 2473 } 2474 2475 if (!sdhci_has_requests(host)) 2476 sdhci_led_deactivate(host); 2477 2478 host->mrqs_done[i] = NULL; 2479 2480 mmiowb(); 2481 spin_unlock_irqrestore(&host->lock, flags); 2482 2483 mmc_request_done(host->mmc, mrq); 2484 2485 return false; 2486 } 2487 2488 static void sdhci_tasklet_finish(unsigned long param) 2489 { 2490 struct sdhci_host *host = (struct sdhci_host *)param; 2491 2492 while (!sdhci_request_done(host)) 2493 ; 2494 } 2495 2496 static void sdhci_timeout_timer(struct timer_list *t) 2497 { 2498 struct sdhci_host *host; 2499 unsigned long flags; 2500 2501 host = from_timer(host, t, timer); 2502 2503 spin_lock_irqsave(&host->lock, flags); 2504 2505 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 2506 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 2507 mmc_hostname(host->mmc)); 2508 sdhci_dumpregs(host); 2509 2510 host->cmd->error = -ETIMEDOUT; 2511 sdhci_finish_mrq(host, host->cmd->mrq); 2512 } 2513 2514 mmiowb(); 2515 spin_unlock_irqrestore(&host->lock, flags); 2516 } 2517 2518 static void sdhci_timeout_data_timer(struct timer_list *t) 2519 { 2520 struct sdhci_host *host; 2521 unsigned long flags; 2522 2523 host = from_timer(host, t, data_timer); 2524 2525 spin_lock_irqsave(&host->lock, flags); 2526 2527 if (host->data || host->data_cmd || 2528 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 2529 pr_err("%s: Timeout waiting for hardware interrupt.\n", 2530 mmc_hostname(host->mmc)); 2531 sdhci_dumpregs(host); 2532 2533 if (host->data) { 2534 host->data->error = -ETIMEDOUT; 2535 sdhci_finish_data(host); 2536 } else if (host->data_cmd) { 2537 host->data_cmd->error = -ETIMEDOUT; 2538 sdhci_finish_mrq(host, host->data_cmd->mrq); 2539 } else { 2540 host->cmd->error = -ETIMEDOUT; 2541 sdhci_finish_mrq(host, host->cmd->mrq); 2542 } 2543 } 2544 2545 mmiowb(); 2546 spin_unlock_irqrestore(&host->lock, flags); 2547 } 2548 2549 /*****************************************************************************\ 2550 * * 2551 * Interrupt handling * 2552 * * 2553 \*****************************************************************************/ 2554 2555 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) 2556 { 2557 if (!host->cmd) { 2558 /* 2559 * SDHCI recovers from errors by resetting the cmd and data 2560 * circuits. Until that is done, there very well might be more 2561 * interrupts, so ignore them in that case. 2562 */ 2563 if (host->pending_reset) 2564 return; 2565 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 2566 mmc_hostname(host->mmc), (unsigned)intmask); 2567 sdhci_dumpregs(host); 2568 return; 2569 } 2570 2571 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 2572 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 2573 if (intmask & SDHCI_INT_TIMEOUT) 2574 host->cmd->error = -ETIMEDOUT; 2575 else 2576 host->cmd->error = -EILSEQ; 2577 2578 /* 2579 * If this command initiates a data phase and a response 2580 * CRC error is signalled, the card can start transferring 2581 * data - the card may have received the command without 2582 * error. We must not terminate the mmc_request early. 2583 * 2584 * If the card did not receive the command or returned an 2585 * error which prevented it sending data, the data phase 2586 * will time out. 2587 */ 2588 if (host->cmd->data && 2589 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 2590 SDHCI_INT_CRC) { 2591 host->cmd = NULL; 2592 return; 2593 } 2594 2595 sdhci_finish_mrq(host, host->cmd->mrq); 2596 return; 2597 } 2598 2599 if (intmask & SDHCI_INT_RESPONSE) 2600 sdhci_finish_command(host); 2601 } 2602 2603 static void sdhci_adma_show_error(struct sdhci_host *host) 2604 { 2605 void *desc = host->adma_table; 2606 2607 sdhci_dumpregs(host); 2608 2609 while (true) { 2610 struct sdhci_adma2_64_desc *dma_desc = desc; 2611 2612 if (host->flags & SDHCI_USE_64_BIT_DMA) 2613 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2614 desc, le32_to_cpu(dma_desc->addr_hi), 2615 le32_to_cpu(dma_desc->addr_lo), 2616 le16_to_cpu(dma_desc->len), 2617 le16_to_cpu(dma_desc->cmd)); 2618 else 2619 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2620 desc, le32_to_cpu(dma_desc->addr_lo), 2621 le16_to_cpu(dma_desc->len), 2622 le16_to_cpu(dma_desc->cmd)); 2623 2624 desc += host->desc_sz; 2625 2626 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2627 break; 2628 } 2629 } 2630 2631 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2632 { 2633 u32 command; 2634 2635 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2636 if (intmask & SDHCI_INT_DATA_AVAIL) { 2637 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2638 if (command == MMC_SEND_TUNING_BLOCK || 2639 command == MMC_SEND_TUNING_BLOCK_HS200) { 2640 host->tuning_done = 1; 2641 wake_up(&host->buf_ready_int); 2642 return; 2643 } 2644 } 2645 2646 if (!host->data) { 2647 struct mmc_command *data_cmd = host->data_cmd; 2648 2649 /* 2650 * The "data complete" interrupt is also used to 2651 * indicate that a busy state has ended. See comment 2652 * above in sdhci_cmd_irq(). 2653 */ 2654 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 2655 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2656 host->data_cmd = NULL; 2657 data_cmd->error = -ETIMEDOUT; 2658 sdhci_finish_mrq(host, data_cmd->mrq); 2659 return; 2660 } 2661 if (intmask & SDHCI_INT_DATA_END) { 2662 host->data_cmd = NULL; 2663 /* 2664 * Some cards handle busy-end interrupt 2665 * before the command completed, so make 2666 * sure we do things in the proper order. 2667 */ 2668 if (host->cmd == data_cmd) 2669 return; 2670 2671 sdhci_finish_mrq(host, data_cmd->mrq); 2672 return; 2673 } 2674 } 2675 2676 /* 2677 * SDHCI recovers from errors by resetting the cmd and data 2678 * circuits. Until that is done, there very well might be more 2679 * interrupts, so ignore them in that case. 2680 */ 2681 if (host->pending_reset) 2682 return; 2683 2684 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 2685 mmc_hostname(host->mmc), (unsigned)intmask); 2686 sdhci_dumpregs(host); 2687 2688 return; 2689 } 2690 2691 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2692 host->data->error = -ETIMEDOUT; 2693 else if (intmask & SDHCI_INT_DATA_END_BIT) 2694 host->data->error = -EILSEQ; 2695 else if ((intmask & SDHCI_INT_DATA_CRC) && 2696 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2697 != MMC_BUS_TEST_R) 2698 host->data->error = -EILSEQ; 2699 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2700 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2701 sdhci_adma_show_error(host); 2702 host->data->error = -EIO; 2703 if (host->ops->adma_workaround) 2704 host->ops->adma_workaround(host, intmask); 2705 } 2706 2707 if (host->data->error) 2708 sdhci_finish_data(host); 2709 else { 2710 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2711 sdhci_transfer_pio(host); 2712 2713 /* 2714 * We currently don't do anything fancy with DMA 2715 * boundaries, but as we can't disable the feature 2716 * we need to at least restart the transfer. 2717 * 2718 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2719 * should return a valid address to continue from, but as 2720 * some controllers are faulty, don't trust them. 2721 */ 2722 if (intmask & SDHCI_INT_DMA_END) { 2723 u32 dmastart, dmanow; 2724 2725 dmastart = sdhci_sdma_address(host); 2726 dmanow = dmastart + host->data->bytes_xfered; 2727 /* 2728 * Force update to the next DMA block boundary. 2729 */ 2730 dmanow = (dmanow & 2731 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2732 SDHCI_DEFAULT_BOUNDARY_SIZE; 2733 host->data->bytes_xfered = dmanow - dmastart; 2734 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n", 2735 dmastart, host->data->bytes_xfered, dmanow); 2736 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); 2737 } 2738 2739 if (intmask & SDHCI_INT_DATA_END) { 2740 if (host->cmd == host->data_cmd) { 2741 /* 2742 * Data managed to finish before the 2743 * command completed. Make sure we do 2744 * things in the proper order. 2745 */ 2746 host->data_early = 1; 2747 } else { 2748 sdhci_finish_data(host); 2749 } 2750 } 2751 } 2752 } 2753 2754 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2755 { 2756 irqreturn_t result = IRQ_NONE; 2757 struct sdhci_host *host = dev_id; 2758 u32 intmask, mask, unexpected = 0; 2759 int max_loops = 16; 2760 2761 spin_lock(&host->lock); 2762 2763 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2764 spin_unlock(&host->lock); 2765 return IRQ_NONE; 2766 } 2767 2768 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2769 if (!intmask || intmask == 0xffffffff) { 2770 result = IRQ_NONE; 2771 goto out; 2772 } 2773 2774 do { 2775 DBG("IRQ status 0x%08x\n", intmask); 2776 2777 if (host->ops->irq) { 2778 intmask = host->ops->irq(host, intmask); 2779 if (!intmask) 2780 goto cont; 2781 } 2782 2783 /* Clear selected interrupts. */ 2784 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2785 SDHCI_INT_BUS_POWER); 2786 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2787 2788 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2789 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2790 SDHCI_CARD_PRESENT; 2791 2792 /* 2793 * There is a observation on i.mx esdhc. INSERT 2794 * bit will be immediately set again when it gets 2795 * cleared, if a card is inserted. We have to mask 2796 * the irq to prevent interrupt storm which will 2797 * freeze the system. And the REMOVE gets the 2798 * same situation. 2799 * 2800 * More testing are needed here to ensure it works 2801 * for other platforms though. 2802 */ 2803 host->ier &= ~(SDHCI_INT_CARD_INSERT | 2804 SDHCI_INT_CARD_REMOVE); 2805 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 2806 SDHCI_INT_CARD_INSERT; 2807 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2808 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2809 2810 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 2811 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 2812 2813 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 2814 SDHCI_INT_CARD_REMOVE); 2815 result = IRQ_WAKE_THREAD; 2816 } 2817 2818 if (intmask & SDHCI_INT_CMD_MASK) 2819 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 2820 2821 if (intmask & SDHCI_INT_DATA_MASK) 2822 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2823 2824 if (intmask & SDHCI_INT_BUS_POWER) 2825 pr_err("%s: Card is consuming too much power!\n", 2826 mmc_hostname(host->mmc)); 2827 2828 if (intmask & SDHCI_INT_RETUNE) 2829 mmc_retune_needed(host->mmc); 2830 2831 if ((intmask & SDHCI_INT_CARD_INT) && 2832 (host->ier & SDHCI_INT_CARD_INT)) { 2833 sdhci_enable_sdio_irq_nolock(host, false); 2834 host->thread_isr |= SDHCI_INT_CARD_INT; 2835 result = IRQ_WAKE_THREAD; 2836 } 2837 2838 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 2839 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2840 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 2841 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 2842 2843 if (intmask) { 2844 unexpected |= intmask; 2845 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 2846 } 2847 cont: 2848 if (result == IRQ_NONE) 2849 result = IRQ_HANDLED; 2850 2851 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2852 } while (intmask && --max_loops); 2853 out: 2854 spin_unlock(&host->lock); 2855 2856 if (unexpected) { 2857 pr_err("%s: Unexpected interrupt 0x%08x.\n", 2858 mmc_hostname(host->mmc), unexpected); 2859 sdhci_dumpregs(host); 2860 } 2861 2862 return result; 2863 } 2864 2865 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 2866 { 2867 struct sdhci_host *host = dev_id; 2868 unsigned long flags; 2869 u32 isr; 2870 2871 spin_lock_irqsave(&host->lock, flags); 2872 isr = host->thread_isr; 2873 host->thread_isr = 0; 2874 spin_unlock_irqrestore(&host->lock, flags); 2875 2876 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2877 struct mmc_host *mmc = host->mmc; 2878 2879 mmc->ops->card_event(mmc); 2880 mmc_detect_change(mmc, msecs_to_jiffies(200)); 2881 } 2882 2883 if (isr & SDHCI_INT_CARD_INT) { 2884 sdio_run_irqs(host->mmc); 2885 2886 spin_lock_irqsave(&host->lock, flags); 2887 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 2888 sdhci_enable_sdio_irq_nolock(host, true); 2889 spin_unlock_irqrestore(&host->lock, flags); 2890 } 2891 2892 return isr ? IRQ_HANDLED : IRQ_NONE; 2893 } 2894 2895 /*****************************************************************************\ 2896 * * 2897 * Suspend/resume * 2898 * * 2899 \*****************************************************************************/ 2900 2901 #ifdef CONFIG_PM 2902 /* 2903 * To enable wakeup events, the corresponding events have to be enabled in 2904 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 2905 * Table' in the SD Host Controller Standard Specification. 2906 * It is useless to restore SDHCI_INT_ENABLE state in 2907 * sdhci_disable_irq_wakeups() since it will be set by 2908 * sdhci_enable_card_detection() or sdhci_init(). 2909 */ 2910 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 2911 { 2912 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 2913 SDHCI_WAKE_ON_INT; 2914 u32 irq_val = 0; 2915 u8 wake_val = 0; 2916 u8 val; 2917 2918 if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) { 2919 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 2920 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 2921 } 2922 2923 wake_val |= SDHCI_WAKE_ON_INT; 2924 irq_val |= SDHCI_INT_CARD_INT; 2925 2926 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2927 val &= ~mask; 2928 val |= wake_val; 2929 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2930 2931 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 2932 2933 host->irq_wake_enabled = !enable_irq_wake(host->irq); 2934 2935 return host->irq_wake_enabled; 2936 } 2937 2938 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 2939 { 2940 u8 val; 2941 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 2942 | SDHCI_WAKE_ON_INT; 2943 2944 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 2945 val &= ~mask; 2946 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 2947 2948 disable_irq_wake(host->irq); 2949 2950 host->irq_wake_enabled = false; 2951 } 2952 2953 int sdhci_suspend_host(struct sdhci_host *host) 2954 { 2955 sdhci_disable_card_detection(host); 2956 2957 mmc_retune_timer_stop(host->mmc); 2958 2959 if (!device_may_wakeup(mmc_dev(host->mmc)) || 2960 !sdhci_enable_irq_wakeups(host)) { 2961 host->ier = 0; 2962 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 2963 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2964 free_irq(host->irq, host); 2965 } 2966 2967 return 0; 2968 } 2969 2970 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 2971 2972 int sdhci_resume_host(struct sdhci_host *host) 2973 { 2974 struct mmc_host *mmc = host->mmc; 2975 int ret = 0; 2976 2977 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2978 if (host->ops->enable_dma) 2979 host->ops->enable_dma(host); 2980 } 2981 2982 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2983 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2984 /* Card keeps power but host controller does not */ 2985 sdhci_init(host, 0); 2986 host->pwr = 0; 2987 host->clock = 0; 2988 mmc->ops->set_ios(mmc, &mmc->ios); 2989 } else { 2990 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 2991 mmiowb(); 2992 } 2993 2994 if (host->irq_wake_enabled) { 2995 sdhci_disable_irq_wakeups(host); 2996 } else { 2997 ret = request_threaded_irq(host->irq, sdhci_irq, 2998 sdhci_thread_irq, IRQF_SHARED, 2999 mmc_hostname(host->mmc), host); 3000 if (ret) 3001 return ret; 3002 } 3003 3004 sdhci_enable_card_detection(host); 3005 3006 return ret; 3007 } 3008 3009 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3010 3011 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3012 { 3013 unsigned long flags; 3014 3015 mmc_retune_timer_stop(host->mmc); 3016 3017 spin_lock_irqsave(&host->lock, flags); 3018 host->ier &= SDHCI_INT_CARD_INT; 3019 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3020 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3021 spin_unlock_irqrestore(&host->lock, flags); 3022 3023 synchronize_hardirq(host->irq); 3024 3025 spin_lock_irqsave(&host->lock, flags); 3026 host->runtime_suspended = true; 3027 spin_unlock_irqrestore(&host->lock, flags); 3028 3029 return 0; 3030 } 3031 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3032 3033 int sdhci_runtime_resume_host(struct sdhci_host *host) 3034 { 3035 struct mmc_host *mmc = host->mmc; 3036 unsigned long flags; 3037 int host_flags = host->flags; 3038 3039 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3040 if (host->ops->enable_dma) 3041 host->ops->enable_dma(host); 3042 } 3043 3044 sdhci_init(host, 0); 3045 3046 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3047 mmc->ios.power_mode != MMC_POWER_OFF) { 3048 /* Force clock and power re-program */ 3049 host->pwr = 0; 3050 host->clock = 0; 3051 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3052 mmc->ops->set_ios(mmc, &mmc->ios); 3053 3054 if ((host_flags & SDHCI_PV_ENABLED) && 3055 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3056 spin_lock_irqsave(&host->lock, flags); 3057 sdhci_enable_preset_value(host, true); 3058 spin_unlock_irqrestore(&host->lock, flags); 3059 } 3060 3061 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3062 mmc->ops->hs400_enhanced_strobe) 3063 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3064 } 3065 3066 spin_lock_irqsave(&host->lock, flags); 3067 3068 host->runtime_suspended = false; 3069 3070 /* Enable SDIO IRQ */ 3071 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 3072 sdhci_enable_sdio_irq_nolock(host, true); 3073 3074 /* Enable Card Detection */ 3075 sdhci_enable_card_detection(host); 3076 3077 spin_unlock_irqrestore(&host->lock, flags); 3078 3079 return 0; 3080 } 3081 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3082 3083 #endif /* CONFIG_PM */ 3084 3085 /*****************************************************************************\ 3086 * * 3087 * Command Queue Engine (CQE) helpers * 3088 * * 3089 \*****************************************************************************/ 3090 3091 void sdhci_cqe_enable(struct mmc_host *mmc) 3092 { 3093 struct sdhci_host *host = mmc_priv(mmc); 3094 unsigned long flags; 3095 u8 ctrl; 3096 3097 spin_lock_irqsave(&host->lock, flags); 3098 3099 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3100 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3101 if (host->flags & SDHCI_USE_64_BIT_DMA) 3102 ctrl |= SDHCI_CTRL_ADMA64; 3103 else 3104 ctrl |= SDHCI_CTRL_ADMA32; 3105 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3106 3107 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3108 SDHCI_BLOCK_SIZE); 3109 3110 /* Set maximum timeout */ 3111 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL); 3112 3113 host->ier = host->cqe_ier; 3114 3115 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3116 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3117 3118 host->cqe_on = true; 3119 3120 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3121 mmc_hostname(mmc), host->ier, 3122 sdhci_readl(host, SDHCI_INT_STATUS)); 3123 3124 mmiowb(); 3125 spin_unlock_irqrestore(&host->lock, flags); 3126 } 3127 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3128 3129 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3130 { 3131 struct sdhci_host *host = mmc_priv(mmc); 3132 unsigned long flags; 3133 3134 spin_lock_irqsave(&host->lock, flags); 3135 3136 sdhci_set_default_irqs(host); 3137 3138 host->cqe_on = false; 3139 3140 if (recovery) { 3141 sdhci_do_reset(host, SDHCI_RESET_CMD); 3142 sdhci_do_reset(host, SDHCI_RESET_DATA); 3143 } 3144 3145 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3146 mmc_hostname(mmc), host->ier, 3147 sdhci_readl(host, SDHCI_INT_STATUS)); 3148 3149 mmiowb(); 3150 spin_unlock_irqrestore(&host->lock, flags); 3151 } 3152 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3153 3154 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3155 int *data_error) 3156 { 3157 u32 mask; 3158 3159 if (!host->cqe_on) 3160 return false; 3161 3162 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3163 *cmd_error = -EILSEQ; 3164 else if (intmask & SDHCI_INT_TIMEOUT) 3165 *cmd_error = -ETIMEDOUT; 3166 else 3167 *cmd_error = 0; 3168 3169 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3170 *data_error = -EILSEQ; 3171 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3172 *data_error = -ETIMEDOUT; 3173 else if (intmask & SDHCI_INT_ADMA_ERROR) 3174 *data_error = -EIO; 3175 else 3176 *data_error = 0; 3177 3178 /* Clear selected interrupts. */ 3179 mask = intmask & host->cqe_ier; 3180 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3181 3182 if (intmask & SDHCI_INT_BUS_POWER) 3183 pr_err("%s: Card is consuming too much power!\n", 3184 mmc_hostname(host->mmc)); 3185 3186 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3187 if (intmask) { 3188 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3189 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3190 mmc_hostname(host->mmc), intmask); 3191 sdhci_dumpregs(host); 3192 } 3193 3194 return true; 3195 } 3196 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3197 3198 /*****************************************************************************\ 3199 * * 3200 * Device allocation/registration * 3201 * * 3202 \*****************************************************************************/ 3203 3204 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3205 size_t priv_size) 3206 { 3207 struct mmc_host *mmc; 3208 struct sdhci_host *host; 3209 3210 WARN_ON(dev == NULL); 3211 3212 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3213 if (!mmc) 3214 return ERR_PTR(-ENOMEM); 3215 3216 host = mmc_priv(mmc); 3217 host->mmc = mmc; 3218 host->mmc_host_ops = sdhci_ops; 3219 mmc->ops = &host->mmc_host_ops; 3220 3221 host->flags = SDHCI_SIGNALING_330; 3222 3223 host->cqe_ier = SDHCI_CQE_INT_MASK; 3224 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3225 3226 host->tuning_delay = -1; 3227 3228 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3229 3230 return host; 3231 } 3232 3233 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3234 3235 static int sdhci_set_dma_mask(struct sdhci_host *host) 3236 { 3237 struct mmc_host *mmc = host->mmc; 3238 struct device *dev = mmc_dev(mmc); 3239 int ret = -EINVAL; 3240 3241 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3242 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3243 3244 /* Try 64-bit mask if hardware is capable of it */ 3245 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3246 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3247 if (ret) { 3248 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3249 mmc_hostname(mmc)); 3250 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3251 } 3252 } 3253 3254 /* 32-bit mask as default & fallback */ 3255 if (ret) { 3256 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3257 if (ret) 3258 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 3259 mmc_hostname(mmc)); 3260 } 3261 3262 return ret; 3263 } 3264 3265 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) 3266 { 3267 u16 v; 3268 u64 dt_caps_mask = 0; 3269 u64 dt_caps = 0; 3270 3271 if (host->read_caps) 3272 return; 3273 3274 host->read_caps = true; 3275 3276 if (debug_quirks) 3277 host->quirks = debug_quirks; 3278 3279 if (debug_quirks2) 3280 host->quirks2 = debug_quirks2; 3281 3282 sdhci_do_reset(host, SDHCI_RESET_ALL); 3283 3284 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3285 "sdhci-caps-mask", &dt_caps_mask); 3286 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3287 "sdhci-caps", &dt_caps); 3288 3289 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 3290 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 3291 3292 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 3293 return; 3294 3295 if (caps) { 3296 host->caps = *caps; 3297 } else { 3298 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 3299 host->caps &= ~lower_32_bits(dt_caps_mask); 3300 host->caps |= lower_32_bits(dt_caps); 3301 } 3302 3303 if (host->version < SDHCI_SPEC_300) 3304 return; 3305 3306 if (caps1) { 3307 host->caps1 = *caps1; 3308 } else { 3309 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 3310 host->caps1 &= ~upper_32_bits(dt_caps_mask); 3311 host->caps1 |= upper_32_bits(dt_caps); 3312 } 3313 } 3314 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 3315 3316 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) 3317 { 3318 struct mmc_host *mmc = host->mmc; 3319 unsigned int max_blocks; 3320 unsigned int bounce_size; 3321 int ret; 3322 3323 /* 3324 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 3325 * has diminishing returns, this is probably because SD/MMC 3326 * cards are usually optimized to handle this size of requests. 3327 */ 3328 bounce_size = SZ_64K; 3329 /* 3330 * Adjust downwards to maximum request size if this is less 3331 * than our segment size, else hammer down the maximum 3332 * request size to the maximum buffer size. 3333 */ 3334 if (mmc->max_req_size < bounce_size) 3335 bounce_size = mmc->max_req_size; 3336 max_blocks = bounce_size / 512; 3337 3338 /* 3339 * When we just support one segment, we can get significant 3340 * speedups by the help of a bounce buffer to group scattered 3341 * reads/writes together. 3342 */ 3343 host->bounce_buffer = devm_kmalloc(mmc->parent, 3344 bounce_size, 3345 GFP_KERNEL); 3346 if (!host->bounce_buffer) { 3347 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 3348 mmc_hostname(mmc), 3349 bounce_size); 3350 /* 3351 * Exiting with zero here makes sure we proceed with 3352 * mmc->max_segs == 1. 3353 */ 3354 return 0; 3355 } 3356 3357 host->bounce_addr = dma_map_single(mmc->parent, 3358 host->bounce_buffer, 3359 bounce_size, 3360 DMA_BIDIRECTIONAL); 3361 ret = dma_mapping_error(mmc->parent, host->bounce_addr); 3362 if (ret) 3363 /* Again fall back to max_segs == 1 */ 3364 return 0; 3365 host->bounce_buffer_size = bounce_size; 3366 3367 /* Lie about this since we're bouncing */ 3368 mmc->max_segs = max_blocks; 3369 mmc->max_seg_size = bounce_size; 3370 mmc->max_req_size = bounce_size; 3371 3372 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 3373 mmc_hostname(mmc), max_blocks, bounce_size); 3374 3375 return 0; 3376 } 3377 3378 int sdhci_setup_host(struct sdhci_host *host) 3379 { 3380 struct mmc_host *mmc; 3381 u32 max_current_caps; 3382 unsigned int ocr_avail; 3383 unsigned int override_timeout_clk; 3384 u32 max_clk; 3385 int ret; 3386 3387 WARN_ON(host == NULL); 3388 if (host == NULL) 3389 return -EINVAL; 3390 3391 mmc = host->mmc; 3392 3393 /* 3394 * If there are external regulators, get them. Note this must be done 3395 * early before resetting the host and reading the capabilities so that 3396 * the host can take the appropriate action if regulators are not 3397 * available. 3398 */ 3399 ret = mmc_regulator_get_supply(mmc); 3400 if (ret) 3401 return ret; 3402 3403 DBG("Version: 0x%08x | Present: 0x%08x\n", 3404 sdhci_readw(host, SDHCI_HOST_VERSION), 3405 sdhci_readl(host, SDHCI_PRESENT_STATE)); 3406 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 3407 sdhci_readl(host, SDHCI_CAPABILITIES), 3408 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 3409 3410 sdhci_read_caps(host); 3411 3412 override_timeout_clk = host->timeout_clk; 3413 3414 if (host->version > SDHCI_SPEC_300) { 3415 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 3416 mmc_hostname(mmc), host->version); 3417 } 3418 3419 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 3420 host->flags |= SDHCI_USE_SDMA; 3421 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 3422 DBG("Controller doesn't have SDMA capability\n"); 3423 else 3424 host->flags |= SDHCI_USE_SDMA; 3425 3426 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 3427 (host->flags & SDHCI_USE_SDMA)) { 3428 DBG("Disabling DMA as it is marked broken\n"); 3429 host->flags &= ~SDHCI_USE_SDMA; 3430 } 3431 3432 if ((host->version >= SDHCI_SPEC_200) && 3433 (host->caps & SDHCI_CAN_DO_ADMA2)) 3434 host->flags |= SDHCI_USE_ADMA; 3435 3436 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 3437 (host->flags & SDHCI_USE_ADMA)) { 3438 DBG("Disabling ADMA as it is marked broken\n"); 3439 host->flags &= ~SDHCI_USE_ADMA; 3440 } 3441 3442 /* 3443 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask 3444 * and *must* do 64-bit DMA. A driver has the opportunity to change 3445 * that during the first call to ->enable_dma(). Similarly 3446 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to 3447 * implement. 3448 */ 3449 if (host->caps & SDHCI_CAN_64BIT) 3450 host->flags |= SDHCI_USE_64_BIT_DMA; 3451 3452 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3453 ret = sdhci_set_dma_mask(host); 3454 3455 if (!ret && host->ops->enable_dma) 3456 ret = host->ops->enable_dma(host); 3457 3458 if (ret) { 3459 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 3460 mmc_hostname(mmc)); 3461 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 3462 3463 ret = 0; 3464 } 3465 } 3466 3467 /* SDMA does not support 64-bit DMA */ 3468 if (host->flags & SDHCI_USE_64_BIT_DMA) 3469 host->flags &= ~SDHCI_USE_SDMA; 3470 3471 if (host->flags & SDHCI_USE_ADMA) { 3472 dma_addr_t dma; 3473 void *buf; 3474 3475 /* 3476 * The DMA descriptor table size is calculated as the maximum 3477 * number of segments times 2, to allow for an alignment 3478 * descriptor for each segment, plus 1 for a nop end descriptor, 3479 * all multipled by the descriptor size. 3480 */ 3481 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3482 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 3483 SDHCI_ADMA2_64_DESC_SZ; 3484 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; 3485 } else { 3486 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * 3487 SDHCI_ADMA2_32_DESC_SZ; 3488 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 3489 } 3490 3491 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 3492 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3493 host->adma_table_sz, &dma, GFP_KERNEL); 3494 if (!buf) { 3495 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3496 mmc_hostname(mmc)); 3497 host->flags &= ~SDHCI_USE_ADMA; 3498 } else if ((dma + host->align_buffer_sz) & 3499 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 3500 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 3501 mmc_hostname(mmc)); 3502 host->flags &= ~SDHCI_USE_ADMA; 3503 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3504 host->adma_table_sz, buf, dma); 3505 } else { 3506 host->align_buffer = buf; 3507 host->align_addr = dma; 3508 3509 host->adma_table = buf + host->align_buffer_sz; 3510 host->adma_addr = dma + host->align_buffer_sz; 3511 } 3512 } 3513 3514 /* 3515 * If we use DMA, then it's up to the caller to set the DMA 3516 * mask, but PIO does not need the hw shim so we set a new 3517 * mask here in that case. 3518 */ 3519 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 3520 host->dma_mask = DMA_BIT_MASK(64); 3521 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3522 } 3523 3524 if (host->version >= SDHCI_SPEC_300) 3525 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) 3526 >> SDHCI_CLOCK_BASE_SHIFT; 3527 else 3528 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK) 3529 >> SDHCI_CLOCK_BASE_SHIFT; 3530 3531 host->max_clk *= 1000000; 3532 if (host->max_clk == 0 || host->quirks & 3533 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 3534 if (!host->ops->get_max_clock) { 3535 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 3536 mmc_hostname(mmc)); 3537 ret = -ENODEV; 3538 goto undma; 3539 } 3540 host->max_clk = host->ops->get_max_clock(host); 3541 } 3542 3543 /* 3544 * In case of Host Controller v3.00, find out whether clock 3545 * multiplier is supported. 3546 */ 3547 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >> 3548 SDHCI_CLOCK_MUL_SHIFT; 3549 3550 /* 3551 * In case the value in Clock Multiplier is 0, then programmable 3552 * clock mode is not supported, otherwise the actual clock 3553 * multiplier is one more than the value of Clock Multiplier 3554 * in the Capabilities Register. 3555 */ 3556 if (host->clk_mul) 3557 host->clk_mul += 1; 3558 3559 /* 3560 * Set host parameters. 3561 */ 3562 max_clk = host->max_clk; 3563 3564 if (host->ops->get_min_clock) 3565 mmc->f_min = host->ops->get_min_clock(host); 3566 else if (host->version >= SDHCI_SPEC_300) { 3567 if (host->clk_mul) { 3568 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3569 max_clk = host->max_clk * host->clk_mul; 3570 } else 3571 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3572 } else 3573 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3574 3575 if (!mmc->f_max || mmc->f_max > max_clk) 3576 mmc->f_max = max_clk; 3577 3578 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3579 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> 3580 SDHCI_TIMEOUT_CLK_SHIFT; 3581 3582 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 3583 host->timeout_clk *= 1000; 3584 3585 if (host->timeout_clk == 0) { 3586 if (!host->ops->get_timeout_clock) { 3587 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3588 mmc_hostname(mmc)); 3589 ret = -ENODEV; 3590 goto undma; 3591 } 3592 3593 host->timeout_clk = 3594 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 3595 1000); 3596 } 3597 3598 if (override_timeout_clk) 3599 host->timeout_clk = override_timeout_clk; 3600 3601 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3602 host->ops->get_max_timeout_count(host) : 1 << 27; 3603 mmc->max_busy_timeout /= host->timeout_clk; 3604 } 3605 3606 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3607 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3608 3609 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3610 host->flags |= SDHCI_AUTO_CMD12; 3611 3612 /* Auto-CMD23 stuff only works in ADMA or PIO. */ 3613 if ((host->version >= SDHCI_SPEC_300) && 3614 ((host->flags & SDHCI_USE_ADMA) || 3615 !(host->flags & SDHCI_USE_SDMA)) && 3616 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3617 host->flags |= SDHCI_AUTO_CMD23; 3618 DBG("Auto-CMD23 available\n"); 3619 } else { 3620 DBG("Auto-CMD23 unavailable\n"); 3621 } 3622 3623 /* 3624 * A controller may support 8-bit width, but the board itself 3625 * might not have the pins brought out. Boards that support 3626 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3627 * their platform code before calling sdhci_add_host(), and we 3628 * won't assume 8-bit width for hosts without that CAP. 3629 */ 3630 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3631 mmc->caps |= MMC_CAP_4_BIT_DATA; 3632 3633 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3634 mmc->caps &= ~MMC_CAP_CMD23; 3635 3636 if (host->caps & SDHCI_CAN_DO_HISPD) 3637 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3638 3639 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3640 mmc_card_is_removable(mmc) && 3641 mmc_gpio_get_cd(host->mmc) < 0) 3642 mmc->caps |= MMC_CAP_NEEDS_POLL; 3643 3644 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ 3645 if (!IS_ERR(mmc->supply.vqmmc)) { 3646 ret = regulator_enable(mmc->supply.vqmmc); 3647 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 3648 1950000)) 3649 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 3650 SDHCI_SUPPORT_SDR50 | 3651 SDHCI_SUPPORT_DDR50); 3652 if (ret) { 3653 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3654 mmc_hostname(mmc), ret); 3655 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 3656 } 3657 } 3658 3659 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 3660 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3661 SDHCI_SUPPORT_DDR50); 3662 } 3663 3664 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 3665 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3666 SDHCI_SUPPORT_DDR50)) 3667 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 3668 3669 /* SDR104 supports also implies SDR50 support */ 3670 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 3671 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 3672 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3673 * field can be promoted to support HS200. 3674 */ 3675 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 3676 mmc->caps2 |= MMC_CAP2_HS200; 3677 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 3678 mmc->caps |= MMC_CAP_UHS_SDR50; 3679 } 3680 3681 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 3682 (host->caps1 & SDHCI_SUPPORT_HS400)) 3683 mmc->caps2 |= MMC_CAP2_HS400; 3684 3685 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 3686 (IS_ERR(mmc->supply.vqmmc) || 3687 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 3688 1300000))) 3689 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 3690 3691 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 3692 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3693 mmc->caps |= MMC_CAP_UHS_DDR50; 3694 3695 /* Does the host need tuning for SDR50? */ 3696 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 3697 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 3698 3699 /* Driver Type(s) (A, C, D) supported by the host */ 3700 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 3701 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 3702 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 3703 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 3704 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 3705 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 3706 3707 /* Initial value for re-tuning timer count */ 3708 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 3709 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 3710 3711 /* 3712 * In case Re-tuning Timer is not disabled, the actual value of 3713 * re-tuning timer will be 2 ^ (n - 1). 3714 */ 3715 if (host->tuning_count) 3716 host->tuning_count = 1 << (host->tuning_count - 1); 3717 3718 /* Re-tuning mode supported by the Host Controller */ 3719 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >> 3720 SDHCI_RETUNING_MODE_SHIFT; 3721 3722 ocr_avail = 0; 3723 3724 /* 3725 * According to SD Host Controller spec v3.00, if the Host System 3726 * can afford more than 150mA, Host Driver should set XPC to 1. Also 3727 * the value is meaningful only if Voltage Support in the Capabilities 3728 * register is set. The actual current value is 4 times the register 3729 * value. 3730 */ 3731 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 3732 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 3733 int curr = regulator_get_current_limit(mmc->supply.vmmc); 3734 if (curr > 0) { 3735 3736 /* convert to SDHCI_MAX_CURRENT format */ 3737 curr = curr/1000; /* convert to mA */ 3738 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 3739 3740 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 3741 max_current_caps = 3742 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 3743 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 3744 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 3745 } 3746 } 3747 3748 if (host->caps & SDHCI_CAN_VDD_330) { 3749 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 3750 3751 mmc->max_current_330 = ((max_current_caps & 3752 SDHCI_MAX_CURRENT_330_MASK) >> 3753 SDHCI_MAX_CURRENT_330_SHIFT) * 3754 SDHCI_MAX_CURRENT_MULTIPLIER; 3755 } 3756 if (host->caps & SDHCI_CAN_VDD_300) { 3757 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 3758 3759 mmc->max_current_300 = ((max_current_caps & 3760 SDHCI_MAX_CURRENT_300_MASK) >> 3761 SDHCI_MAX_CURRENT_300_SHIFT) * 3762 SDHCI_MAX_CURRENT_MULTIPLIER; 3763 } 3764 if (host->caps & SDHCI_CAN_VDD_180) { 3765 ocr_avail |= MMC_VDD_165_195; 3766 3767 mmc->max_current_180 = ((max_current_caps & 3768 SDHCI_MAX_CURRENT_180_MASK) >> 3769 SDHCI_MAX_CURRENT_180_SHIFT) * 3770 SDHCI_MAX_CURRENT_MULTIPLIER; 3771 } 3772 3773 /* If OCR set by host, use it instead. */ 3774 if (host->ocr_mask) 3775 ocr_avail = host->ocr_mask; 3776 3777 /* If OCR set by external regulators, give it highest prio. */ 3778 if (mmc->ocr_avail) 3779 ocr_avail = mmc->ocr_avail; 3780 3781 mmc->ocr_avail = ocr_avail; 3782 mmc->ocr_avail_sdio = ocr_avail; 3783 if (host->ocr_avail_sdio) 3784 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 3785 mmc->ocr_avail_sd = ocr_avail; 3786 if (host->ocr_avail_sd) 3787 mmc->ocr_avail_sd &= host->ocr_avail_sd; 3788 else /* normal SD controllers don't support 1.8V */ 3789 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 3790 mmc->ocr_avail_mmc = ocr_avail; 3791 if (host->ocr_avail_mmc) 3792 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 3793 3794 if (mmc->ocr_avail == 0) { 3795 pr_err("%s: Hardware doesn't report any support voltages.\n", 3796 mmc_hostname(mmc)); 3797 ret = -ENODEV; 3798 goto unreg; 3799 } 3800 3801 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 3802 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 3803 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 3804 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 3805 host->flags |= SDHCI_SIGNALING_180; 3806 3807 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 3808 host->flags |= SDHCI_SIGNALING_120; 3809 3810 spin_lock_init(&host->lock); 3811 3812 /* 3813 * Maximum number of sectors in one transfer. Limited by SDMA boundary 3814 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 3815 * is less anyway. 3816 */ 3817 mmc->max_req_size = 524288; 3818 3819 /* 3820 * Maximum number of segments. Depends on if the hardware 3821 * can do scatter/gather or not. 3822 */ 3823 if (host->flags & SDHCI_USE_ADMA) { 3824 mmc->max_segs = SDHCI_MAX_SEGS; 3825 } else if (host->flags & SDHCI_USE_SDMA) { 3826 mmc->max_segs = 1; 3827 if (swiotlb_max_segment()) { 3828 unsigned int max_req_size = (1 << IO_TLB_SHIFT) * 3829 IO_TLB_SEGSIZE; 3830 mmc->max_req_size = min(mmc->max_req_size, 3831 max_req_size); 3832 } 3833 } else { /* PIO */ 3834 mmc->max_segs = SDHCI_MAX_SEGS; 3835 } 3836 3837 /* 3838 * Maximum segment size. Could be one segment with the maximum number 3839 * of bytes. When doing hardware scatter/gather, each entry cannot 3840 * be larger than 64 KiB though. 3841 */ 3842 if (host->flags & SDHCI_USE_ADMA) { 3843 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 3844 mmc->max_seg_size = 65535; 3845 else 3846 mmc->max_seg_size = 65536; 3847 } else { 3848 mmc->max_seg_size = mmc->max_req_size; 3849 } 3850 3851 /* 3852 * Maximum block size. This varies from controller to controller and 3853 * is specified in the capabilities register. 3854 */ 3855 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 3856 mmc->max_blk_size = 2; 3857 } else { 3858 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 3859 SDHCI_MAX_BLOCK_SHIFT; 3860 if (mmc->max_blk_size >= 3) { 3861 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 3862 mmc_hostname(mmc)); 3863 mmc->max_blk_size = 0; 3864 } 3865 } 3866 3867 mmc->max_blk_size = 512 << mmc->max_blk_size; 3868 3869 /* 3870 * Maximum block count. 3871 */ 3872 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 3873 3874 if (mmc->max_segs == 1) { 3875 /* This may alter mmc->*_blk_* parameters */ 3876 ret = sdhci_allocate_bounce_buffer(host); 3877 if (ret) 3878 return ret; 3879 } 3880 3881 return 0; 3882 3883 unreg: 3884 if (!IS_ERR(mmc->supply.vqmmc)) 3885 regulator_disable(mmc->supply.vqmmc); 3886 undma: 3887 if (host->align_buffer) 3888 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3889 host->adma_table_sz, host->align_buffer, 3890 host->align_addr); 3891 host->adma_table = NULL; 3892 host->align_buffer = NULL; 3893 3894 return ret; 3895 } 3896 EXPORT_SYMBOL_GPL(sdhci_setup_host); 3897 3898 void sdhci_cleanup_host(struct sdhci_host *host) 3899 { 3900 struct mmc_host *mmc = host->mmc; 3901 3902 if (!IS_ERR(mmc->supply.vqmmc)) 3903 regulator_disable(mmc->supply.vqmmc); 3904 3905 if (host->align_buffer) 3906 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3907 host->adma_table_sz, host->align_buffer, 3908 host->align_addr); 3909 host->adma_table = NULL; 3910 host->align_buffer = NULL; 3911 } 3912 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 3913 3914 int __sdhci_add_host(struct sdhci_host *host) 3915 { 3916 struct mmc_host *mmc = host->mmc; 3917 int ret; 3918 3919 /* 3920 * Init tasklets. 3921 */ 3922 tasklet_init(&host->finish_tasklet, 3923 sdhci_tasklet_finish, (unsigned long)host); 3924 3925 timer_setup(&host->timer, sdhci_timeout_timer, 0); 3926 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 3927 3928 init_waitqueue_head(&host->buf_ready_int); 3929 3930 sdhci_init(host, 0); 3931 3932 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 3933 IRQF_SHARED, mmc_hostname(mmc), host); 3934 if (ret) { 3935 pr_err("%s: Failed to request IRQ %d: %d\n", 3936 mmc_hostname(mmc), host->irq, ret); 3937 goto untasklet; 3938 } 3939 3940 ret = sdhci_led_register(host); 3941 if (ret) { 3942 pr_err("%s: Failed to register LED device: %d\n", 3943 mmc_hostname(mmc), ret); 3944 goto unirq; 3945 } 3946 3947 mmiowb(); 3948 3949 ret = mmc_add_host(mmc); 3950 if (ret) 3951 goto unled; 3952 3953 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3954 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3955 (host->flags & SDHCI_USE_ADMA) ? 3956 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 3957 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3958 3959 sdhci_enable_card_detection(host); 3960 3961 return 0; 3962 3963 unled: 3964 sdhci_led_unregister(host); 3965 unirq: 3966 sdhci_do_reset(host, SDHCI_RESET_ALL); 3967 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3968 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3969 free_irq(host->irq, host); 3970 untasklet: 3971 tasklet_kill(&host->finish_tasklet); 3972 3973 return ret; 3974 } 3975 EXPORT_SYMBOL_GPL(__sdhci_add_host); 3976 3977 int sdhci_add_host(struct sdhci_host *host) 3978 { 3979 int ret; 3980 3981 ret = sdhci_setup_host(host); 3982 if (ret) 3983 return ret; 3984 3985 ret = __sdhci_add_host(host); 3986 if (ret) 3987 goto cleanup; 3988 3989 return 0; 3990 3991 cleanup: 3992 sdhci_cleanup_host(host); 3993 3994 return ret; 3995 } 3996 EXPORT_SYMBOL_GPL(sdhci_add_host); 3997 3998 void sdhci_remove_host(struct sdhci_host *host, int dead) 3999 { 4000 struct mmc_host *mmc = host->mmc; 4001 unsigned long flags; 4002 4003 if (dead) { 4004 spin_lock_irqsave(&host->lock, flags); 4005 4006 host->flags |= SDHCI_DEVICE_DEAD; 4007 4008 if (sdhci_has_requests(host)) { 4009 pr_err("%s: Controller removed during " 4010 " transfer!\n", mmc_hostname(mmc)); 4011 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4012 } 4013 4014 spin_unlock_irqrestore(&host->lock, flags); 4015 } 4016 4017 sdhci_disable_card_detection(host); 4018 4019 mmc_remove_host(mmc); 4020 4021 sdhci_led_unregister(host); 4022 4023 if (!dead) 4024 sdhci_do_reset(host, SDHCI_RESET_ALL); 4025 4026 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4027 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4028 free_irq(host->irq, host); 4029 4030 del_timer_sync(&host->timer); 4031 del_timer_sync(&host->data_timer); 4032 4033 tasklet_kill(&host->finish_tasklet); 4034 4035 if (!IS_ERR(mmc->supply.vqmmc)) 4036 regulator_disable(mmc->supply.vqmmc); 4037 4038 if (host->align_buffer) 4039 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4040 host->adma_table_sz, host->align_buffer, 4041 host->align_addr); 4042 4043 host->adma_table = NULL; 4044 host->align_buffer = NULL; 4045 } 4046 4047 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4048 4049 void sdhci_free_host(struct sdhci_host *host) 4050 { 4051 mmc_free_host(host->mmc); 4052 } 4053 4054 EXPORT_SYMBOL_GPL(sdhci_free_host); 4055 4056 /*****************************************************************************\ 4057 * * 4058 * Driver init/exit * 4059 * * 4060 \*****************************************************************************/ 4061 4062 static int __init sdhci_drv_init(void) 4063 { 4064 pr_info(DRIVER_NAME 4065 ": Secure Digital Host Controller Interface driver\n"); 4066 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4067 4068 return 0; 4069 } 4070 4071 static void __exit sdhci_drv_exit(void) 4072 { 4073 } 4074 4075 module_init(sdhci_drv_init); 4076 module_exit(sdhci_drv_exit); 4077 4078 module_param(debug_quirks, uint, 0444); 4079 module_param(debug_quirks2, uint, 0444); 4080 4081 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4082 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4083 MODULE_LICENSE("GPL"); 4084 4085 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4086 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4087