1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/swiotlb.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/of.h> 27 28 #include <linux/leds.h> 29 30 #include <linux/mmc/mmc.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/card.h> 33 #include <linux/mmc/sdio.h> 34 #include <linux/mmc/slot-gpio.h> 35 36 #include "sdhci.h" 37 38 #define DRIVER_NAME "sdhci" 39 40 #define DBG(f, x...) \ 41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 42 43 #define SDHCI_DUMP(f, x...) \ 44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 45 46 #define MAX_TUNING_LOOP 40 47 48 static unsigned int debug_quirks = 0; 49 static unsigned int debug_quirks2; 50 51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 52 53 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 54 55 void sdhci_dumpregs(struct sdhci_host *host) 56 { 57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 58 59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 60 sdhci_readl(host, SDHCI_DMA_ADDRESS), 61 sdhci_readw(host, SDHCI_HOST_VERSION)); 62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 63 sdhci_readw(host, SDHCI_BLOCK_SIZE), 64 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 66 sdhci_readl(host, SDHCI_ARGUMENT), 67 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 69 sdhci_readl(host, SDHCI_PRESENT_STATE), 70 sdhci_readb(host, SDHCI_HOST_CONTROL)); 71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 72 sdhci_readb(host, SDHCI_POWER_CONTROL), 73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 76 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 79 sdhci_readl(host, SDHCI_INT_STATUS)); 80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 81 sdhci_readl(host, SDHCI_INT_ENABLE), 82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 83 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 84 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 87 sdhci_readl(host, SDHCI_CAPABILITIES), 88 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 90 sdhci_readw(host, SDHCI_COMMAND), 91 sdhci_readl(host, SDHCI_MAX_CURRENT)); 92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 93 sdhci_readl(host, SDHCI_RESPONSE), 94 sdhci_readl(host, SDHCI_RESPONSE + 4)); 95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 96 sdhci_readl(host, SDHCI_RESPONSE + 8), 97 sdhci_readl(host, SDHCI_RESPONSE + 12)); 98 SDHCI_DUMP("Host ctl2: 0x%08x\n", 99 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 100 101 if (host->flags & SDHCI_USE_ADMA) { 102 if (host->flags & SDHCI_USE_64_BIT_DMA) { 103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 104 sdhci_readl(host, SDHCI_ADMA_ERROR), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 106 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 107 } else { 108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 109 sdhci_readl(host, SDHCI_ADMA_ERROR), 110 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 111 } 112 } 113 114 if (host->ops->dump_vendor_regs) 115 host->ops->dump_vendor_regs(host); 116 117 SDHCI_DUMP("============================================\n"); 118 } 119 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 120 121 /*****************************************************************************\ 122 * * 123 * Low level functions * 124 * * 125 \*****************************************************************************/ 126 127 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 128 { 129 u16 ctrl2; 130 131 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 132 if (ctrl2 & SDHCI_CTRL_V4_MODE) 133 return; 134 135 ctrl2 |= SDHCI_CTRL_V4_MODE; 136 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 137 } 138 139 /* 140 * This can be called before sdhci_add_host() by Vendor's host controller 141 * driver to enable v4 mode if supported. 142 */ 143 void sdhci_enable_v4_mode(struct sdhci_host *host) 144 { 145 host->v4_mode = true; 146 sdhci_do_enable_v4_mode(host); 147 } 148 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 149 150 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 151 { 152 return cmd->data || cmd->flags & MMC_RSP_BUSY; 153 } 154 155 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 156 { 157 u32 present; 158 159 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 160 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 161 return; 162 163 if (enable) { 164 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 165 SDHCI_CARD_PRESENT; 166 167 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 168 SDHCI_INT_CARD_INSERT; 169 } else { 170 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 171 } 172 173 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 174 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 175 } 176 177 static void sdhci_enable_card_detection(struct sdhci_host *host) 178 { 179 sdhci_set_card_detection(host, true); 180 } 181 182 static void sdhci_disable_card_detection(struct sdhci_host *host) 183 { 184 sdhci_set_card_detection(host, false); 185 } 186 187 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 188 { 189 if (host->bus_on) 190 return; 191 host->bus_on = true; 192 pm_runtime_get_noresume(host->mmc->parent); 193 } 194 195 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 196 { 197 if (!host->bus_on) 198 return; 199 host->bus_on = false; 200 pm_runtime_put_noidle(host->mmc->parent); 201 } 202 203 void sdhci_reset(struct sdhci_host *host, u8 mask) 204 { 205 ktime_t timeout; 206 207 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 208 209 if (mask & SDHCI_RESET_ALL) { 210 host->clock = 0; 211 /* Reset-all turns off SD Bus Power */ 212 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 213 sdhci_runtime_pm_bus_off(host); 214 } 215 216 /* Wait max 100 ms */ 217 timeout = ktime_add_ms(ktime_get(), 100); 218 219 /* hw clears the bit when it's done */ 220 while (1) { 221 bool timedout = ktime_after(ktime_get(), timeout); 222 223 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 224 break; 225 if (timedout) { 226 pr_err("%s: Reset 0x%x never completed.\n", 227 mmc_hostname(host->mmc), (int)mask); 228 sdhci_dumpregs(host); 229 return; 230 } 231 udelay(10); 232 } 233 } 234 EXPORT_SYMBOL_GPL(sdhci_reset); 235 236 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 { 238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 239 struct mmc_host *mmc = host->mmc; 240 241 if (!mmc->ops->get_cd(mmc)) 242 return; 243 } 244 245 host->ops->reset(host, mask); 246 247 if (mask & SDHCI_RESET_ALL) { 248 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 249 if (host->ops->enable_dma) 250 host->ops->enable_dma(host); 251 } 252 253 /* Resetting the controller clears many */ 254 host->preset_enabled = false; 255 } 256 } 257 258 static void sdhci_set_default_irqs(struct sdhci_host *host) 259 { 260 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 261 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 262 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 263 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 264 SDHCI_INT_RESPONSE; 265 266 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 267 host->tuning_mode == SDHCI_TUNING_MODE_3) 268 host->ier |= SDHCI_INT_RETUNE; 269 270 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 271 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 272 } 273 274 static void sdhci_config_dma(struct sdhci_host *host) 275 { 276 u8 ctrl; 277 u16 ctrl2; 278 279 if (host->version < SDHCI_SPEC_200) 280 return; 281 282 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 283 284 /* 285 * Always adjust the DMA selection as some controllers 286 * (e.g. JMicron) can't do PIO properly when the selection 287 * is ADMA. 288 */ 289 ctrl &= ~SDHCI_CTRL_DMA_MASK; 290 if (!(host->flags & SDHCI_REQ_USE_DMA)) 291 goto out; 292 293 /* Note if DMA Select is zero then SDMA is selected */ 294 if (host->flags & SDHCI_USE_ADMA) 295 ctrl |= SDHCI_CTRL_ADMA32; 296 297 if (host->flags & SDHCI_USE_64_BIT_DMA) { 298 /* 299 * If v4 mode, all supported DMA can be 64-bit addressing if 300 * controller supports 64-bit system address, otherwise only 301 * ADMA can support 64-bit addressing. 302 */ 303 if (host->v4_mode) { 304 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 305 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 306 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 307 } else if (host->flags & SDHCI_USE_ADMA) { 308 /* 309 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 310 * set SDHCI_CTRL_ADMA64. 311 */ 312 ctrl |= SDHCI_CTRL_ADMA64; 313 } 314 } 315 316 out: 317 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 318 } 319 320 static void sdhci_init(struct sdhci_host *host, int soft) 321 { 322 struct mmc_host *mmc = host->mmc; 323 unsigned long flags; 324 325 if (soft) 326 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 327 else 328 sdhci_do_reset(host, SDHCI_RESET_ALL); 329 330 if (host->v4_mode) 331 sdhci_do_enable_v4_mode(host); 332 333 spin_lock_irqsave(&host->lock, flags); 334 sdhci_set_default_irqs(host); 335 spin_unlock_irqrestore(&host->lock, flags); 336 337 host->cqe_on = false; 338 339 if (soft) { 340 /* force clock reconfiguration */ 341 host->clock = 0; 342 mmc->ops->set_ios(mmc, &mmc->ios); 343 } 344 } 345 346 static void sdhci_reinit(struct sdhci_host *host) 347 { 348 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 349 350 sdhci_init(host, 0); 351 sdhci_enable_card_detection(host); 352 353 /* 354 * A change to the card detect bits indicates a change in present state, 355 * refer sdhci_set_card_detection(). A card detect interrupt might have 356 * been missed while the host controller was being reset, so trigger a 357 * rescan to check. 358 */ 359 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 360 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 361 } 362 363 static void __sdhci_led_activate(struct sdhci_host *host) 364 { 365 u8 ctrl; 366 367 if (host->quirks & SDHCI_QUIRK_NO_LED) 368 return; 369 370 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 371 ctrl |= SDHCI_CTRL_LED; 372 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 373 } 374 375 static void __sdhci_led_deactivate(struct sdhci_host *host) 376 { 377 u8 ctrl; 378 379 if (host->quirks & SDHCI_QUIRK_NO_LED) 380 return; 381 382 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 383 ctrl &= ~SDHCI_CTRL_LED; 384 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 385 } 386 387 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 388 static void sdhci_led_control(struct led_classdev *led, 389 enum led_brightness brightness) 390 { 391 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 392 unsigned long flags; 393 394 spin_lock_irqsave(&host->lock, flags); 395 396 if (host->runtime_suspended) 397 goto out; 398 399 if (brightness == LED_OFF) 400 __sdhci_led_deactivate(host); 401 else 402 __sdhci_led_activate(host); 403 out: 404 spin_unlock_irqrestore(&host->lock, flags); 405 } 406 407 static int sdhci_led_register(struct sdhci_host *host) 408 { 409 struct mmc_host *mmc = host->mmc; 410 411 if (host->quirks & SDHCI_QUIRK_NO_LED) 412 return 0; 413 414 snprintf(host->led_name, sizeof(host->led_name), 415 "%s::", mmc_hostname(mmc)); 416 417 host->led.name = host->led_name; 418 host->led.brightness = LED_OFF; 419 host->led.default_trigger = mmc_hostname(mmc); 420 host->led.brightness_set = sdhci_led_control; 421 422 return led_classdev_register(mmc_dev(mmc), &host->led); 423 } 424 425 static void sdhci_led_unregister(struct sdhci_host *host) 426 { 427 if (host->quirks & SDHCI_QUIRK_NO_LED) 428 return; 429 430 led_classdev_unregister(&host->led); 431 } 432 433 static inline void sdhci_led_activate(struct sdhci_host *host) 434 { 435 } 436 437 static inline void sdhci_led_deactivate(struct sdhci_host *host) 438 { 439 } 440 441 #else 442 443 static inline int sdhci_led_register(struct sdhci_host *host) 444 { 445 return 0; 446 } 447 448 static inline void sdhci_led_unregister(struct sdhci_host *host) 449 { 450 } 451 452 static inline void sdhci_led_activate(struct sdhci_host *host) 453 { 454 __sdhci_led_activate(host); 455 } 456 457 static inline void sdhci_led_deactivate(struct sdhci_host *host) 458 { 459 __sdhci_led_deactivate(host); 460 } 461 462 #endif 463 464 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 465 unsigned long timeout) 466 { 467 if (sdhci_data_line_cmd(mrq->cmd)) 468 mod_timer(&host->data_timer, timeout); 469 else 470 mod_timer(&host->timer, timeout); 471 } 472 473 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 474 { 475 if (sdhci_data_line_cmd(mrq->cmd)) 476 del_timer(&host->data_timer); 477 else 478 del_timer(&host->timer); 479 } 480 481 static inline bool sdhci_has_requests(struct sdhci_host *host) 482 { 483 return host->cmd || host->data_cmd; 484 } 485 486 /*****************************************************************************\ 487 * * 488 * Core functions * 489 * * 490 \*****************************************************************************/ 491 492 static void sdhci_read_block_pio(struct sdhci_host *host) 493 { 494 unsigned long flags; 495 size_t blksize, len, chunk; 496 u32 scratch; 497 u8 *buf; 498 499 DBG("PIO reading\n"); 500 501 blksize = host->data->blksz; 502 chunk = 0; 503 504 local_irq_save(flags); 505 506 while (blksize) { 507 BUG_ON(!sg_miter_next(&host->sg_miter)); 508 509 len = min(host->sg_miter.length, blksize); 510 511 blksize -= len; 512 host->sg_miter.consumed = len; 513 514 buf = host->sg_miter.addr; 515 516 while (len) { 517 if (chunk == 0) { 518 scratch = sdhci_readl(host, SDHCI_BUFFER); 519 chunk = 4; 520 } 521 522 *buf = scratch & 0xFF; 523 524 buf++; 525 scratch >>= 8; 526 chunk--; 527 len--; 528 } 529 } 530 531 sg_miter_stop(&host->sg_miter); 532 533 local_irq_restore(flags); 534 } 535 536 static void sdhci_write_block_pio(struct sdhci_host *host) 537 { 538 unsigned long flags; 539 size_t blksize, len, chunk; 540 u32 scratch; 541 u8 *buf; 542 543 DBG("PIO writing\n"); 544 545 blksize = host->data->blksz; 546 chunk = 0; 547 scratch = 0; 548 549 local_irq_save(flags); 550 551 while (blksize) { 552 BUG_ON(!sg_miter_next(&host->sg_miter)); 553 554 len = min(host->sg_miter.length, blksize); 555 556 blksize -= len; 557 host->sg_miter.consumed = len; 558 559 buf = host->sg_miter.addr; 560 561 while (len) { 562 scratch |= (u32)*buf << (chunk * 8); 563 564 buf++; 565 chunk++; 566 len--; 567 568 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 569 sdhci_writel(host, scratch, SDHCI_BUFFER); 570 chunk = 0; 571 scratch = 0; 572 } 573 } 574 } 575 576 sg_miter_stop(&host->sg_miter); 577 578 local_irq_restore(flags); 579 } 580 581 static void sdhci_transfer_pio(struct sdhci_host *host) 582 { 583 u32 mask; 584 585 if (host->blocks == 0) 586 return; 587 588 if (host->data->flags & MMC_DATA_READ) 589 mask = SDHCI_DATA_AVAILABLE; 590 else 591 mask = SDHCI_SPACE_AVAILABLE; 592 593 /* 594 * Some controllers (JMicron JMB38x) mess up the buffer bits 595 * for transfers < 4 bytes. As long as it is just one block, 596 * we can ignore the bits. 597 */ 598 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 599 (host->data->blocks == 1)) 600 mask = ~0; 601 602 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 603 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 604 udelay(100); 605 606 if (host->data->flags & MMC_DATA_READ) 607 sdhci_read_block_pio(host); 608 else 609 sdhci_write_block_pio(host); 610 611 host->blocks--; 612 if (host->blocks == 0) 613 break; 614 } 615 616 DBG("PIO transfer complete.\n"); 617 } 618 619 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 620 struct mmc_data *data, int cookie) 621 { 622 int sg_count; 623 624 /* 625 * If the data buffers are already mapped, return the previous 626 * dma_map_sg() result. 627 */ 628 if (data->host_cookie == COOKIE_PRE_MAPPED) 629 return data->sg_count; 630 631 /* Bounce write requests to the bounce buffer */ 632 if (host->bounce_buffer) { 633 unsigned int length = data->blksz * data->blocks; 634 635 if (length > host->bounce_buffer_size) { 636 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 637 mmc_hostname(host->mmc), length, 638 host->bounce_buffer_size); 639 return -EIO; 640 } 641 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 642 /* Copy the data to the bounce buffer */ 643 if (host->ops->copy_to_bounce_buffer) { 644 host->ops->copy_to_bounce_buffer(host, 645 data, length); 646 } else { 647 sg_copy_to_buffer(data->sg, data->sg_len, 648 host->bounce_buffer, length); 649 } 650 } 651 /* Switch ownership to the DMA */ 652 dma_sync_single_for_device(host->mmc->parent, 653 host->bounce_addr, 654 host->bounce_buffer_size, 655 mmc_get_dma_dir(data)); 656 /* Just a dummy value */ 657 sg_count = 1; 658 } else { 659 /* Just access the data directly from memory */ 660 sg_count = dma_map_sg(mmc_dev(host->mmc), 661 data->sg, data->sg_len, 662 mmc_get_dma_dir(data)); 663 } 664 665 if (sg_count == 0) 666 return -ENOSPC; 667 668 data->sg_count = sg_count; 669 data->host_cookie = cookie; 670 671 return sg_count; 672 } 673 674 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 675 { 676 local_irq_save(*flags); 677 return kmap_atomic(sg_page(sg)) + sg->offset; 678 } 679 680 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 681 { 682 kunmap_atomic(buffer); 683 local_irq_restore(*flags); 684 } 685 686 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 687 dma_addr_t addr, int len, unsigned int cmd) 688 { 689 struct sdhci_adma2_64_desc *dma_desc = *desc; 690 691 /* 32-bit and 64-bit descriptors have these members in same position */ 692 dma_desc->cmd = cpu_to_le16(cmd); 693 dma_desc->len = cpu_to_le16(len); 694 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 695 696 if (host->flags & SDHCI_USE_64_BIT_DMA) 697 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 698 699 *desc += host->desc_sz; 700 } 701 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 702 703 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 704 void **desc, dma_addr_t addr, 705 int len, unsigned int cmd) 706 { 707 if (host->ops->adma_write_desc) 708 host->ops->adma_write_desc(host, desc, addr, len, cmd); 709 else 710 sdhci_adma_write_desc(host, desc, addr, len, cmd); 711 } 712 713 static void sdhci_adma_mark_end(void *desc) 714 { 715 struct sdhci_adma2_64_desc *dma_desc = desc; 716 717 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 718 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 719 } 720 721 static void sdhci_adma_table_pre(struct sdhci_host *host, 722 struct mmc_data *data, int sg_count) 723 { 724 struct scatterlist *sg; 725 unsigned long flags; 726 dma_addr_t addr, align_addr; 727 void *desc, *align; 728 char *buffer; 729 int len, offset, i; 730 731 /* 732 * The spec does not specify endianness of descriptor table. 733 * We currently guess that it is LE. 734 */ 735 736 host->sg_count = sg_count; 737 738 desc = host->adma_table; 739 align = host->align_buffer; 740 741 align_addr = host->align_addr; 742 743 for_each_sg(data->sg, sg, host->sg_count, i) { 744 addr = sg_dma_address(sg); 745 len = sg_dma_len(sg); 746 747 /* 748 * The SDHCI specification states that ADMA addresses must 749 * be 32-bit aligned. If they aren't, then we use a bounce 750 * buffer for the (up to three) bytes that screw up the 751 * alignment. 752 */ 753 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 754 SDHCI_ADMA2_MASK; 755 if (offset) { 756 if (data->flags & MMC_DATA_WRITE) { 757 buffer = sdhci_kmap_atomic(sg, &flags); 758 memcpy(align, buffer, offset); 759 sdhci_kunmap_atomic(buffer, &flags); 760 } 761 762 /* tran, valid */ 763 __sdhci_adma_write_desc(host, &desc, align_addr, 764 offset, ADMA2_TRAN_VALID); 765 766 BUG_ON(offset > 65536); 767 768 align += SDHCI_ADMA2_ALIGN; 769 align_addr += SDHCI_ADMA2_ALIGN; 770 771 addr += offset; 772 len -= offset; 773 } 774 775 BUG_ON(len > 65536); 776 777 /* tran, valid */ 778 if (len) 779 __sdhci_adma_write_desc(host, &desc, addr, len, 780 ADMA2_TRAN_VALID); 781 782 /* 783 * If this triggers then we have a calculation bug 784 * somewhere. :/ 785 */ 786 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 787 } 788 789 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 790 /* Mark the last descriptor as the terminating descriptor */ 791 if (desc != host->adma_table) { 792 desc -= host->desc_sz; 793 sdhci_adma_mark_end(desc); 794 } 795 } else { 796 /* Add a terminating entry - nop, end, valid */ 797 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 798 } 799 } 800 801 static void sdhci_adma_table_post(struct sdhci_host *host, 802 struct mmc_data *data) 803 { 804 struct scatterlist *sg; 805 int i, size; 806 void *align; 807 char *buffer; 808 unsigned long flags; 809 810 if (data->flags & MMC_DATA_READ) { 811 bool has_unaligned = false; 812 813 /* Do a quick scan of the SG list for any unaligned mappings */ 814 for_each_sg(data->sg, sg, host->sg_count, i) 815 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 816 has_unaligned = true; 817 break; 818 } 819 820 if (has_unaligned) { 821 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 822 data->sg_len, DMA_FROM_DEVICE); 823 824 align = host->align_buffer; 825 826 for_each_sg(data->sg, sg, host->sg_count, i) { 827 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 828 size = SDHCI_ADMA2_ALIGN - 829 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 830 831 buffer = sdhci_kmap_atomic(sg, &flags); 832 memcpy(buffer, align, size); 833 sdhci_kunmap_atomic(buffer, &flags); 834 835 align += SDHCI_ADMA2_ALIGN; 836 } 837 } 838 } 839 } 840 } 841 842 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 843 { 844 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 845 if (host->flags & SDHCI_USE_64_BIT_DMA) 846 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 847 } 848 849 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 850 { 851 if (host->bounce_buffer) 852 return host->bounce_addr; 853 else 854 return sg_dma_address(host->data->sg); 855 } 856 857 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 858 { 859 if (host->v4_mode) 860 sdhci_set_adma_addr(host, addr); 861 else 862 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 863 } 864 865 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 866 struct mmc_command *cmd, 867 struct mmc_data *data) 868 { 869 unsigned int target_timeout; 870 871 /* timeout in us */ 872 if (!data) { 873 target_timeout = cmd->busy_timeout * 1000; 874 } else { 875 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 876 if (host->clock && data->timeout_clks) { 877 unsigned long long val; 878 879 /* 880 * data->timeout_clks is in units of clock cycles. 881 * host->clock is in Hz. target_timeout is in us. 882 * Hence, us = 1000000 * cycles / Hz. Round up. 883 */ 884 val = 1000000ULL * data->timeout_clks; 885 if (do_div(val, host->clock)) 886 target_timeout++; 887 target_timeout += val; 888 } 889 } 890 891 return target_timeout; 892 } 893 894 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 895 struct mmc_command *cmd) 896 { 897 struct mmc_data *data = cmd->data; 898 struct mmc_host *mmc = host->mmc; 899 struct mmc_ios *ios = &mmc->ios; 900 unsigned char bus_width = 1 << ios->bus_width; 901 unsigned int blksz; 902 unsigned int freq; 903 u64 target_timeout; 904 u64 transfer_time; 905 906 target_timeout = sdhci_target_timeout(host, cmd, data); 907 target_timeout *= NSEC_PER_USEC; 908 909 if (data) { 910 blksz = data->blksz; 911 freq = host->mmc->actual_clock ? : host->clock; 912 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 913 do_div(transfer_time, freq); 914 /* multiply by '2' to account for any unknowns */ 915 transfer_time = transfer_time * 2; 916 /* calculate timeout for the entire data */ 917 host->data_timeout = data->blocks * target_timeout + 918 transfer_time; 919 } else { 920 host->data_timeout = target_timeout; 921 } 922 923 if (host->data_timeout) 924 host->data_timeout += MMC_CMD_TRANSFER_TIME; 925 } 926 927 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 928 bool *too_big) 929 { 930 u8 count; 931 struct mmc_data *data; 932 unsigned target_timeout, current_timeout; 933 934 *too_big = true; 935 936 /* 937 * If the host controller provides us with an incorrect timeout 938 * value, just skip the check and use 0xE. The hardware may take 939 * longer to time out, but that's much better than having a too-short 940 * timeout value. 941 */ 942 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 943 return 0xE; 944 945 /* Unspecified command, asume max */ 946 if (cmd == NULL) 947 return 0xE; 948 949 data = cmd->data; 950 /* Unspecified timeout, assume max */ 951 if (!data && !cmd->busy_timeout) 952 return 0xE; 953 954 /* timeout in us */ 955 target_timeout = sdhci_target_timeout(host, cmd, data); 956 957 /* 958 * Figure out needed cycles. 959 * We do this in steps in order to fit inside a 32 bit int. 960 * The first step is the minimum timeout, which will have a 961 * minimum resolution of 6 bits: 962 * (1) 2^13*1000 > 2^22, 963 * (2) host->timeout_clk < 2^16 964 * => 965 * (1) / (2) > 2^6 966 */ 967 count = 0; 968 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 969 while (current_timeout < target_timeout) { 970 count++; 971 current_timeout <<= 1; 972 if (count >= 0xF) 973 break; 974 } 975 976 if (count >= 0xF) { 977 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 978 DBG("Too large timeout 0x%x requested for CMD%d!\n", 979 count, cmd->opcode); 980 count = 0xE; 981 } else { 982 *too_big = false; 983 } 984 985 return count; 986 } 987 988 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 989 { 990 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 991 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 992 993 if (host->flags & SDHCI_REQ_USE_DMA) 994 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 995 else 996 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 997 998 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 999 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1000 else 1001 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1002 1003 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1004 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1005 } 1006 1007 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1008 { 1009 if (enable) 1010 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1011 else 1012 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1013 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1014 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1015 } 1016 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1017 1018 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1019 { 1020 bool too_big = false; 1021 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1022 1023 if (too_big && 1024 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1025 sdhci_calc_sw_timeout(host, cmd); 1026 sdhci_set_data_timeout_irq(host, false); 1027 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1028 sdhci_set_data_timeout_irq(host, true); 1029 } 1030 1031 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1032 } 1033 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1034 1035 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1036 { 1037 if (host->ops->set_timeout) 1038 host->ops->set_timeout(host, cmd); 1039 else 1040 __sdhci_set_timeout(host, cmd); 1041 } 1042 1043 static void sdhci_initialize_data(struct sdhci_host *host, 1044 struct mmc_data *data) 1045 { 1046 WARN_ON(host->data); 1047 1048 /* Sanity checks */ 1049 BUG_ON(data->blksz * data->blocks > 524288); 1050 BUG_ON(data->blksz > host->mmc->max_blk_size); 1051 BUG_ON(data->blocks > 65535); 1052 1053 host->data = data; 1054 host->data_early = 0; 1055 host->data->bytes_xfered = 0; 1056 } 1057 1058 static inline void sdhci_set_block_info(struct sdhci_host *host, 1059 struct mmc_data *data) 1060 { 1061 /* Set the DMA boundary value and block size */ 1062 sdhci_writew(host, 1063 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1064 SDHCI_BLOCK_SIZE); 1065 /* 1066 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1067 * can be supported, in that case 16-bit block count register must be 0. 1068 */ 1069 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1070 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1071 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1072 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1073 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1074 } else { 1075 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1076 } 1077 } 1078 1079 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1080 { 1081 struct mmc_data *data = cmd->data; 1082 1083 sdhci_initialize_data(host, data); 1084 1085 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1086 struct scatterlist *sg; 1087 unsigned int length_mask, offset_mask; 1088 int i; 1089 1090 host->flags |= SDHCI_REQ_USE_DMA; 1091 1092 /* 1093 * FIXME: This doesn't account for merging when mapping the 1094 * scatterlist. 1095 * 1096 * The assumption here being that alignment and lengths are 1097 * the same after DMA mapping to device address space. 1098 */ 1099 length_mask = 0; 1100 offset_mask = 0; 1101 if (host->flags & SDHCI_USE_ADMA) { 1102 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1103 length_mask = 3; 1104 /* 1105 * As we use up to 3 byte chunks to work 1106 * around alignment problems, we need to 1107 * check the offset as well. 1108 */ 1109 offset_mask = 3; 1110 } 1111 } else { 1112 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1113 length_mask = 3; 1114 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1115 offset_mask = 3; 1116 } 1117 1118 if (unlikely(length_mask | offset_mask)) { 1119 for_each_sg(data->sg, sg, data->sg_len, i) { 1120 if (sg->length & length_mask) { 1121 DBG("Reverting to PIO because of transfer size (%d)\n", 1122 sg->length); 1123 host->flags &= ~SDHCI_REQ_USE_DMA; 1124 break; 1125 } 1126 if (sg->offset & offset_mask) { 1127 DBG("Reverting to PIO because of bad alignment\n"); 1128 host->flags &= ~SDHCI_REQ_USE_DMA; 1129 break; 1130 } 1131 } 1132 } 1133 } 1134 1135 if (host->flags & SDHCI_REQ_USE_DMA) { 1136 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1137 1138 if (sg_cnt <= 0) { 1139 /* 1140 * This only happens when someone fed 1141 * us an invalid request. 1142 */ 1143 WARN_ON(1); 1144 host->flags &= ~SDHCI_REQ_USE_DMA; 1145 } else if (host->flags & SDHCI_USE_ADMA) { 1146 sdhci_adma_table_pre(host, data, sg_cnt); 1147 sdhci_set_adma_addr(host, host->adma_addr); 1148 } else { 1149 WARN_ON(sg_cnt != 1); 1150 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1151 } 1152 } 1153 1154 sdhci_config_dma(host); 1155 1156 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1157 int flags; 1158 1159 flags = SG_MITER_ATOMIC; 1160 if (host->data->flags & MMC_DATA_READ) 1161 flags |= SG_MITER_TO_SG; 1162 else 1163 flags |= SG_MITER_FROM_SG; 1164 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1165 host->blocks = data->blocks; 1166 } 1167 1168 sdhci_set_transfer_irqs(host); 1169 1170 sdhci_set_block_info(host, data); 1171 } 1172 1173 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1174 1175 static int sdhci_external_dma_init(struct sdhci_host *host) 1176 { 1177 int ret = 0; 1178 struct mmc_host *mmc = host->mmc; 1179 1180 host->tx_chan = dma_request_chan(mmc->parent, "tx"); 1181 if (IS_ERR(host->tx_chan)) { 1182 ret = PTR_ERR(host->tx_chan); 1183 if (ret != -EPROBE_DEFER) 1184 pr_warn("Failed to request TX DMA channel.\n"); 1185 host->tx_chan = NULL; 1186 return ret; 1187 } 1188 1189 host->rx_chan = dma_request_chan(mmc->parent, "rx"); 1190 if (IS_ERR(host->rx_chan)) { 1191 if (host->tx_chan) { 1192 dma_release_channel(host->tx_chan); 1193 host->tx_chan = NULL; 1194 } 1195 1196 ret = PTR_ERR(host->rx_chan); 1197 if (ret != -EPROBE_DEFER) 1198 pr_warn("Failed to request RX DMA channel.\n"); 1199 host->rx_chan = NULL; 1200 } 1201 1202 return ret; 1203 } 1204 1205 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1206 struct mmc_data *data) 1207 { 1208 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1209 } 1210 1211 static int sdhci_external_dma_setup(struct sdhci_host *host, 1212 struct mmc_command *cmd) 1213 { 1214 int ret, i; 1215 enum dma_transfer_direction dir; 1216 struct dma_async_tx_descriptor *desc; 1217 struct mmc_data *data = cmd->data; 1218 struct dma_chan *chan; 1219 struct dma_slave_config cfg; 1220 dma_cookie_t cookie; 1221 int sg_cnt; 1222 1223 if (!host->mapbase) 1224 return -EINVAL; 1225 1226 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1227 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1228 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1229 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1230 cfg.src_maxburst = data->blksz / 4; 1231 cfg.dst_maxburst = data->blksz / 4; 1232 1233 /* Sanity check: all the SG entries must be aligned by block size. */ 1234 for (i = 0; i < data->sg_len; i++) { 1235 if ((data->sg + i)->length % data->blksz) 1236 return -EINVAL; 1237 } 1238 1239 chan = sdhci_external_dma_channel(host, data); 1240 1241 ret = dmaengine_slave_config(chan, &cfg); 1242 if (ret) 1243 return ret; 1244 1245 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1246 if (sg_cnt <= 0) 1247 return -EINVAL; 1248 1249 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1250 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1251 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1252 if (!desc) 1253 return -EINVAL; 1254 1255 desc->callback = NULL; 1256 desc->callback_param = NULL; 1257 1258 cookie = dmaengine_submit(desc); 1259 if (dma_submit_error(cookie)) 1260 ret = cookie; 1261 1262 return ret; 1263 } 1264 1265 static void sdhci_external_dma_release(struct sdhci_host *host) 1266 { 1267 if (host->tx_chan) { 1268 dma_release_channel(host->tx_chan); 1269 host->tx_chan = NULL; 1270 } 1271 1272 if (host->rx_chan) { 1273 dma_release_channel(host->rx_chan); 1274 host->rx_chan = NULL; 1275 } 1276 1277 sdhci_switch_external_dma(host, false); 1278 } 1279 1280 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1281 struct mmc_command *cmd) 1282 { 1283 struct mmc_data *data = cmd->data; 1284 1285 sdhci_initialize_data(host, data); 1286 1287 host->flags |= SDHCI_REQ_USE_DMA; 1288 sdhci_set_transfer_irqs(host); 1289 1290 sdhci_set_block_info(host, data); 1291 } 1292 1293 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1294 struct mmc_command *cmd) 1295 { 1296 if (!sdhci_external_dma_setup(host, cmd)) { 1297 __sdhci_external_dma_prepare_data(host, cmd); 1298 } else { 1299 sdhci_external_dma_release(host); 1300 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1301 mmc_hostname(host->mmc)); 1302 sdhci_prepare_data(host, cmd); 1303 } 1304 } 1305 1306 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1307 struct mmc_command *cmd) 1308 { 1309 struct dma_chan *chan; 1310 1311 if (!cmd->data) 1312 return; 1313 1314 chan = sdhci_external_dma_channel(host, cmd->data); 1315 if (chan) 1316 dma_async_issue_pending(chan); 1317 } 1318 1319 #else 1320 1321 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1322 { 1323 return -EOPNOTSUPP; 1324 } 1325 1326 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1327 { 1328 } 1329 1330 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1331 struct mmc_command *cmd) 1332 { 1333 /* This should never happen */ 1334 WARN_ON_ONCE(1); 1335 } 1336 1337 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1338 struct mmc_command *cmd) 1339 { 1340 } 1341 1342 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1343 struct mmc_data *data) 1344 { 1345 return NULL; 1346 } 1347 1348 #endif 1349 1350 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1351 { 1352 host->use_external_dma = en; 1353 } 1354 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1355 1356 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1357 struct mmc_request *mrq) 1358 { 1359 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1360 !mrq->cap_cmd_during_tfr; 1361 } 1362 1363 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1364 struct mmc_request *mrq) 1365 { 1366 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1367 } 1368 1369 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1370 struct mmc_request *mrq) 1371 { 1372 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1373 } 1374 1375 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1376 struct mmc_command *cmd, 1377 u16 *mode) 1378 { 1379 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1380 (cmd->opcode != SD_IO_RW_EXTENDED); 1381 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1382 u16 ctrl2; 1383 1384 /* 1385 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1386 * Select' is recommended rather than use of 'Auto CMD12 1387 * Enable' or 'Auto CMD23 Enable'. 1388 */ 1389 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { 1390 *mode |= SDHCI_TRNS_AUTO_SEL; 1391 1392 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1393 if (use_cmd23) 1394 ctrl2 |= SDHCI_CMD23_ENABLE; 1395 else 1396 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1397 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1398 1399 return; 1400 } 1401 1402 /* 1403 * If we are sending CMD23, CMD12 never gets sent 1404 * on successful completion (so no Auto-CMD12). 1405 */ 1406 if (use_cmd12) 1407 *mode |= SDHCI_TRNS_AUTO_CMD12; 1408 else if (use_cmd23) 1409 *mode |= SDHCI_TRNS_AUTO_CMD23; 1410 } 1411 1412 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1413 struct mmc_command *cmd) 1414 { 1415 u16 mode = 0; 1416 struct mmc_data *data = cmd->data; 1417 1418 if (data == NULL) { 1419 if (host->quirks2 & 1420 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1421 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1422 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1423 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1424 } else { 1425 /* clear Auto CMD settings for no data CMDs */ 1426 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1427 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1428 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1429 } 1430 return; 1431 } 1432 1433 WARN_ON(!host->data); 1434 1435 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1436 mode = SDHCI_TRNS_BLK_CNT_EN; 1437 1438 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1439 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1440 sdhci_auto_cmd_select(host, cmd, &mode); 1441 if (sdhci_auto_cmd23(host, cmd->mrq)) 1442 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1443 } 1444 1445 if (data->flags & MMC_DATA_READ) 1446 mode |= SDHCI_TRNS_READ; 1447 if (host->flags & SDHCI_REQ_USE_DMA) 1448 mode |= SDHCI_TRNS_DMA; 1449 1450 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1451 } 1452 1453 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1454 { 1455 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1456 ((mrq->cmd && mrq->cmd->error) || 1457 (mrq->sbc && mrq->sbc->error) || 1458 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1459 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1460 } 1461 1462 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1463 { 1464 int i; 1465 1466 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1467 if (host->mrqs_done[i] == mrq) { 1468 WARN_ON(1); 1469 return; 1470 } 1471 } 1472 1473 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1474 if (!host->mrqs_done[i]) { 1475 host->mrqs_done[i] = mrq; 1476 break; 1477 } 1478 } 1479 1480 WARN_ON(i >= SDHCI_MAX_MRQS); 1481 } 1482 1483 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1484 { 1485 if (host->cmd && host->cmd->mrq == mrq) 1486 host->cmd = NULL; 1487 1488 if (host->data_cmd && host->data_cmd->mrq == mrq) 1489 host->data_cmd = NULL; 1490 1491 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1492 host->deferred_cmd = NULL; 1493 1494 if (host->data && host->data->mrq == mrq) 1495 host->data = NULL; 1496 1497 if (sdhci_needs_reset(host, mrq)) 1498 host->pending_reset = true; 1499 1500 sdhci_set_mrq_done(host, mrq); 1501 1502 sdhci_del_timer(host, mrq); 1503 1504 if (!sdhci_has_requests(host)) 1505 sdhci_led_deactivate(host); 1506 } 1507 1508 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1509 { 1510 __sdhci_finish_mrq(host, mrq); 1511 1512 queue_work(host->complete_wq, &host->complete_work); 1513 } 1514 1515 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1516 { 1517 struct mmc_command *data_cmd = host->data_cmd; 1518 struct mmc_data *data = host->data; 1519 1520 host->data = NULL; 1521 host->data_cmd = NULL; 1522 1523 /* 1524 * The controller needs a reset of internal state machines upon error 1525 * conditions. 1526 */ 1527 if (data->error) { 1528 if (!host->cmd || host->cmd == data_cmd) 1529 sdhci_do_reset(host, SDHCI_RESET_CMD); 1530 sdhci_do_reset(host, SDHCI_RESET_DATA); 1531 } 1532 1533 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1534 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1535 sdhci_adma_table_post(host, data); 1536 1537 /* 1538 * The specification states that the block count register must 1539 * be updated, but it does not specify at what point in the 1540 * data flow. That makes the register entirely useless to read 1541 * back so we have to assume that nothing made it to the card 1542 * in the event of an error. 1543 */ 1544 if (data->error) 1545 data->bytes_xfered = 0; 1546 else 1547 data->bytes_xfered = data->blksz * data->blocks; 1548 1549 /* 1550 * Need to send CMD12 if - 1551 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1552 * b) error in multiblock transfer 1553 */ 1554 if (data->stop && 1555 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1556 data->error)) { 1557 /* 1558 * 'cap_cmd_during_tfr' request must not use the command line 1559 * after mmc_command_done() has been called. It is upper layer's 1560 * responsibility to send the stop command if required. 1561 */ 1562 if (data->mrq->cap_cmd_during_tfr) { 1563 __sdhci_finish_mrq(host, data->mrq); 1564 } else { 1565 /* Avoid triggering warning in sdhci_send_command() */ 1566 host->cmd = NULL; 1567 if (!sdhci_send_command(host, data->stop)) { 1568 if (sw_data_timeout) { 1569 /* 1570 * This is anyway a sw data timeout, so 1571 * give up now. 1572 */ 1573 data->stop->error = -EIO; 1574 __sdhci_finish_mrq(host, data->mrq); 1575 } else { 1576 WARN_ON(host->deferred_cmd); 1577 host->deferred_cmd = data->stop; 1578 } 1579 } 1580 } 1581 } else { 1582 __sdhci_finish_mrq(host, data->mrq); 1583 } 1584 } 1585 1586 static void sdhci_finish_data(struct sdhci_host *host) 1587 { 1588 __sdhci_finish_data(host, false); 1589 } 1590 1591 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1592 { 1593 int flags; 1594 u32 mask; 1595 unsigned long timeout; 1596 1597 WARN_ON(host->cmd); 1598 1599 /* Initially, a command has no error */ 1600 cmd->error = 0; 1601 1602 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1603 cmd->opcode == MMC_STOP_TRANSMISSION) 1604 cmd->flags |= MMC_RSP_BUSY; 1605 1606 mask = SDHCI_CMD_INHIBIT; 1607 if (sdhci_data_line_cmd(cmd)) 1608 mask |= SDHCI_DATA_INHIBIT; 1609 1610 /* We shouldn't wait for data inihibit for stop commands, even 1611 though they might use busy signaling */ 1612 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1613 mask &= ~SDHCI_DATA_INHIBIT; 1614 1615 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1616 return false; 1617 1618 host->cmd = cmd; 1619 host->data_timeout = 0; 1620 if (sdhci_data_line_cmd(cmd)) { 1621 WARN_ON(host->data_cmd); 1622 host->data_cmd = cmd; 1623 sdhci_set_timeout(host, cmd); 1624 } 1625 1626 if (cmd->data) { 1627 if (host->use_external_dma) 1628 sdhci_external_dma_prepare_data(host, cmd); 1629 else 1630 sdhci_prepare_data(host, cmd); 1631 } 1632 1633 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1634 1635 sdhci_set_transfer_mode(host, cmd); 1636 1637 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1638 WARN_ONCE(1, "Unsupported response type!\n"); 1639 /* 1640 * This does not happen in practice because 136-bit response 1641 * commands never have busy waiting, so rather than complicate 1642 * the error path, just remove busy waiting and continue. 1643 */ 1644 cmd->flags &= ~MMC_RSP_BUSY; 1645 } 1646 1647 if (!(cmd->flags & MMC_RSP_PRESENT)) 1648 flags = SDHCI_CMD_RESP_NONE; 1649 else if (cmd->flags & MMC_RSP_136) 1650 flags = SDHCI_CMD_RESP_LONG; 1651 else if (cmd->flags & MMC_RSP_BUSY) 1652 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1653 else 1654 flags = SDHCI_CMD_RESP_SHORT; 1655 1656 if (cmd->flags & MMC_RSP_CRC) 1657 flags |= SDHCI_CMD_CRC; 1658 if (cmd->flags & MMC_RSP_OPCODE) 1659 flags |= SDHCI_CMD_INDEX; 1660 1661 /* CMD19 is special in that the Data Present Select should be set */ 1662 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1663 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1664 flags |= SDHCI_CMD_DATA; 1665 1666 timeout = jiffies; 1667 if (host->data_timeout) 1668 timeout += nsecs_to_jiffies(host->data_timeout); 1669 else if (!cmd->data && cmd->busy_timeout > 9000) 1670 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1671 else 1672 timeout += 10 * HZ; 1673 sdhci_mod_timer(host, cmd->mrq, timeout); 1674 1675 if (host->use_external_dma) 1676 sdhci_external_dma_pre_transfer(host, cmd); 1677 1678 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1679 1680 return true; 1681 } 1682 1683 static bool sdhci_present_error(struct sdhci_host *host, 1684 struct mmc_command *cmd, bool present) 1685 { 1686 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1687 cmd->error = -ENOMEDIUM; 1688 return true; 1689 } 1690 1691 return false; 1692 } 1693 1694 static bool sdhci_send_command_retry(struct sdhci_host *host, 1695 struct mmc_command *cmd, 1696 unsigned long flags) 1697 __releases(host->lock) 1698 __acquires(host->lock) 1699 { 1700 struct mmc_command *deferred_cmd = host->deferred_cmd; 1701 int timeout = 10; /* Approx. 10 ms */ 1702 bool present; 1703 1704 while (!sdhci_send_command(host, cmd)) { 1705 if (!timeout--) { 1706 pr_err("%s: Controller never released inhibit bit(s).\n", 1707 mmc_hostname(host->mmc)); 1708 sdhci_dumpregs(host); 1709 cmd->error = -EIO; 1710 return false; 1711 } 1712 1713 spin_unlock_irqrestore(&host->lock, flags); 1714 1715 usleep_range(1000, 1250); 1716 1717 present = host->mmc->ops->get_cd(host->mmc); 1718 1719 spin_lock_irqsave(&host->lock, flags); 1720 1721 /* A deferred command might disappear, handle that */ 1722 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1723 return true; 1724 1725 if (sdhci_present_error(host, cmd, present)) 1726 return false; 1727 } 1728 1729 if (cmd == host->deferred_cmd) 1730 host->deferred_cmd = NULL; 1731 1732 return true; 1733 } 1734 1735 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1736 { 1737 int i, reg; 1738 1739 for (i = 0; i < 4; i++) { 1740 reg = SDHCI_RESPONSE + (3 - i) * 4; 1741 cmd->resp[i] = sdhci_readl(host, reg); 1742 } 1743 1744 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1745 return; 1746 1747 /* CRC is stripped so we need to do some shifting */ 1748 for (i = 0; i < 4; i++) { 1749 cmd->resp[i] <<= 8; 1750 if (i != 3) 1751 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1752 } 1753 } 1754 1755 static void sdhci_finish_command(struct sdhci_host *host) 1756 { 1757 struct mmc_command *cmd = host->cmd; 1758 1759 host->cmd = NULL; 1760 1761 if (cmd->flags & MMC_RSP_PRESENT) { 1762 if (cmd->flags & MMC_RSP_136) { 1763 sdhci_read_rsp_136(host, cmd); 1764 } else { 1765 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1766 } 1767 } 1768 1769 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1770 mmc_command_done(host->mmc, cmd->mrq); 1771 1772 /* 1773 * The host can send and interrupt when the busy state has 1774 * ended, allowing us to wait without wasting CPU cycles. 1775 * The busy signal uses DAT0 so this is similar to waiting 1776 * for data to complete. 1777 * 1778 * Note: The 1.0 specification is a bit ambiguous about this 1779 * feature so there might be some problems with older 1780 * controllers. 1781 */ 1782 if (cmd->flags & MMC_RSP_BUSY) { 1783 if (cmd->data) { 1784 DBG("Cannot wait for busy signal when also doing a data transfer"); 1785 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1786 cmd == host->data_cmd) { 1787 /* Command complete before busy is ended */ 1788 return; 1789 } 1790 } 1791 1792 /* Finished CMD23, now send actual command. */ 1793 if (cmd == cmd->mrq->sbc) { 1794 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1795 WARN_ON(host->deferred_cmd); 1796 host->deferred_cmd = cmd->mrq->cmd; 1797 } 1798 } else { 1799 1800 /* Processed actual command. */ 1801 if (host->data && host->data_early) 1802 sdhci_finish_data(host); 1803 1804 if (!cmd->data) 1805 __sdhci_finish_mrq(host, cmd->mrq); 1806 } 1807 } 1808 1809 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1810 { 1811 u16 preset = 0; 1812 1813 switch (host->timing) { 1814 case MMC_TIMING_UHS_SDR12: 1815 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1816 break; 1817 case MMC_TIMING_UHS_SDR25: 1818 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1819 break; 1820 case MMC_TIMING_UHS_SDR50: 1821 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1822 break; 1823 case MMC_TIMING_UHS_SDR104: 1824 case MMC_TIMING_MMC_HS200: 1825 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1826 break; 1827 case MMC_TIMING_UHS_DDR50: 1828 case MMC_TIMING_MMC_DDR52: 1829 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1830 break; 1831 case MMC_TIMING_MMC_HS400: 1832 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1833 break; 1834 default: 1835 pr_warn("%s: Invalid UHS-I mode selected\n", 1836 mmc_hostname(host->mmc)); 1837 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1838 break; 1839 } 1840 return preset; 1841 } 1842 1843 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1844 unsigned int *actual_clock) 1845 { 1846 int div = 0; /* Initialized for compiler warning */ 1847 int real_div = div, clk_mul = 1; 1848 u16 clk = 0; 1849 bool switch_base_clk = false; 1850 1851 if (host->version >= SDHCI_SPEC_300) { 1852 if (host->preset_enabled) { 1853 u16 pre_val; 1854 1855 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1856 pre_val = sdhci_get_preset_value(host); 1857 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1858 if (host->clk_mul && 1859 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1860 clk = SDHCI_PROG_CLOCK_MODE; 1861 real_div = div + 1; 1862 clk_mul = host->clk_mul; 1863 } else { 1864 real_div = max_t(int, 1, div << 1); 1865 } 1866 goto clock_set; 1867 } 1868 1869 /* 1870 * Check if the Host Controller supports Programmable Clock 1871 * Mode. 1872 */ 1873 if (host->clk_mul) { 1874 for (div = 1; div <= 1024; div++) { 1875 if ((host->max_clk * host->clk_mul / div) 1876 <= clock) 1877 break; 1878 } 1879 if ((host->max_clk * host->clk_mul / div) <= clock) { 1880 /* 1881 * Set Programmable Clock Mode in the Clock 1882 * Control register. 1883 */ 1884 clk = SDHCI_PROG_CLOCK_MODE; 1885 real_div = div; 1886 clk_mul = host->clk_mul; 1887 div--; 1888 } else { 1889 /* 1890 * Divisor can be too small to reach clock 1891 * speed requirement. Then use the base clock. 1892 */ 1893 switch_base_clk = true; 1894 } 1895 } 1896 1897 if (!host->clk_mul || switch_base_clk) { 1898 /* Version 3.00 divisors must be a multiple of 2. */ 1899 if (host->max_clk <= clock) 1900 div = 1; 1901 else { 1902 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1903 div += 2) { 1904 if ((host->max_clk / div) <= clock) 1905 break; 1906 } 1907 } 1908 real_div = div; 1909 div >>= 1; 1910 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1911 && !div && host->max_clk <= 25000000) 1912 div = 1; 1913 } 1914 } else { 1915 /* Version 2.00 divisors must be a power of 2. */ 1916 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1917 if ((host->max_clk / div) <= clock) 1918 break; 1919 } 1920 real_div = div; 1921 div >>= 1; 1922 } 1923 1924 clock_set: 1925 if (real_div) 1926 *actual_clock = (host->max_clk * clk_mul) / real_div; 1927 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1928 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1929 << SDHCI_DIVIDER_HI_SHIFT; 1930 1931 return clk; 1932 } 1933 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1934 1935 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1936 { 1937 ktime_t timeout; 1938 1939 clk |= SDHCI_CLOCK_INT_EN; 1940 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1941 1942 /* Wait max 150 ms */ 1943 timeout = ktime_add_ms(ktime_get(), 150); 1944 while (1) { 1945 bool timedout = ktime_after(ktime_get(), timeout); 1946 1947 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1948 if (clk & SDHCI_CLOCK_INT_STABLE) 1949 break; 1950 if (timedout) { 1951 pr_err("%s: Internal clock never stabilised.\n", 1952 mmc_hostname(host->mmc)); 1953 sdhci_dumpregs(host); 1954 return; 1955 } 1956 udelay(10); 1957 } 1958 1959 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 1960 clk |= SDHCI_CLOCK_PLL_EN; 1961 clk &= ~SDHCI_CLOCK_INT_STABLE; 1962 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1963 1964 /* Wait max 150 ms */ 1965 timeout = ktime_add_ms(ktime_get(), 150); 1966 while (1) { 1967 bool timedout = ktime_after(ktime_get(), timeout); 1968 1969 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1970 if (clk & SDHCI_CLOCK_INT_STABLE) 1971 break; 1972 if (timedout) { 1973 pr_err("%s: PLL clock never stabilised.\n", 1974 mmc_hostname(host->mmc)); 1975 sdhci_dumpregs(host); 1976 return; 1977 } 1978 udelay(10); 1979 } 1980 } 1981 1982 clk |= SDHCI_CLOCK_CARD_EN; 1983 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1984 } 1985 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1986 1987 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1988 { 1989 u16 clk; 1990 1991 host->mmc->actual_clock = 0; 1992 1993 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1994 1995 if (clock == 0) 1996 return; 1997 1998 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1999 sdhci_enable_clk(host, clk); 2000 } 2001 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2002 2003 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2004 unsigned short vdd) 2005 { 2006 struct mmc_host *mmc = host->mmc; 2007 2008 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2009 2010 if (mode != MMC_POWER_OFF) 2011 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2012 else 2013 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2014 } 2015 2016 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2017 unsigned short vdd) 2018 { 2019 u8 pwr = 0; 2020 2021 if (mode != MMC_POWER_OFF) { 2022 switch (1 << vdd) { 2023 case MMC_VDD_165_195: 2024 /* 2025 * Without a regulator, SDHCI does not support 2.0v 2026 * so we only get here if the driver deliberately 2027 * added the 2.0v range to ocr_avail. Map it to 1.8v 2028 * for the purpose of turning on the power. 2029 */ 2030 case MMC_VDD_20_21: 2031 pwr = SDHCI_POWER_180; 2032 break; 2033 case MMC_VDD_29_30: 2034 case MMC_VDD_30_31: 2035 pwr = SDHCI_POWER_300; 2036 break; 2037 case MMC_VDD_32_33: 2038 case MMC_VDD_33_34: 2039 pwr = SDHCI_POWER_330; 2040 break; 2041 default: 2042 WARN(1, "%s: Invalid vdd %#x\n", 2043 mmc_hostname(host->mmc), vdd); 2044 break; 2045 } 2046 } 2047 2048 if (host->pwr == pwr) 2049 return; 2050 2051 host->pwr = pwr; 2052 2053 if (pwr == 0) { 2054 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2055 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2056 sdhci_runtime_pm_bus_off(host); 2057 } else { 2058 /* 2059 * Spec says that we should clear the power reg before setting 2060 * a new value. Some controllers don't seem to like this though. 2061 */ 2062 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2063 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2064 2065 /* 2066 * At least the Marvell CaFe chip gets confused if we set the 2067 * voltage and set turn on power at the same time, so set the 2068 * voltage first. 2069 */ 2070 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2071 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2072 2073 pwr |= SDHCI_POWER_ON; 2074 2075 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2076 2077 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2078 sdhci_runtime_pm_bus_on(host); 2079 2080 /* 2081 * Some controllers need an extra 10ms delay of 10ms before 2082 * they can apply clock after applying power 2083 */ 2084 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2085 mdelay(10); 2086 } 2087 } 2088 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2089 2090 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2091 unsigned short vdd) 2092 { 2093 if (IS_ERR(host->mmc->supply.vmmc)) 2094 sdhci_set_power_noreg(host, mode, vdd); 2095 else 2096 sdhci_set_power_reg(host, mode, vdd); 2097 } 2098 EXPORT_SYMBOL_GPL(sdhci_set_power); 2099 2100 /* 2101 * Some controllers need to configure a valid bus voltage on their power 2102 * register regardless of whether an external regulator is taking care of power 2103 * supply. This helper function takes care of it if set as the controller's 2104 * sdhci_ops.set_power callback. 2105 */ 2106 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2107 unsigned char mode, 2108 unsigned short vdd) 2109 { 2110 if (!IS_ERR(host->mmc->supply.vmmc)) { 2111 struct mmc_host *mmc = host->mmc; 2112 2113 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2114 } 2115 sdhci_set_power_noreg(host, mode, vdd); 2116 } 2117 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2118 2119 /*****************************************************************************\ 2120 * * 2121 * MMC callbacks * 2122 * * 2123 \*****************************************************************************/ 2124 2125 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2126 { 2127 struct sdhci_host *host = mmc_priv(mmc); 2128 struct mmc_command *cmd; 2129 unsigned long flags; 2130 bool present; 2131 2132 /* Firstly check card presence */ 2133 present = mmc->ops->get_cd(mmc); 2134 2135 spin_lock_irqsave(&host->lock, flags); 2136 2137 sdhci_led_activate(host); 2138 2139 if (sdhci_present_error(host, mrq->cmd, present)) 2140 goto out_finish; 2141 2142 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2143 2144 if (!sdhci_send_command_retry(host, cmd, flags)) 2145 goto out_finish; 2146 2147 spin_unlock_irqrestore(&host->lock, flags); 2148 2149 return; 2150 2151 out_finish: 2152 sdhci_finish_mrq(host, mrq); 2153 spin_unlock_irqrestore(&host->lock, flags); 2154 } 2155 EXPORT_SYMBOL_GPL(sdhci_request); 2156 2157 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2158 { 2159 struct sdhci_host *host = mmc_priv(mmc); 2160 struct mmc_command *cmd; 2161 unsigned long flags; 2162 int ret = 0; 2163 2164 spin_lock_irqsave(&host->lock, flags); 2165 2166 if (sdhci_present_error(host, mrq->cmd, true)) { 2167 sdhci_finish_mrq(host, mrq); 2168 goto out_finish; 2169 } 2170 2171 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2172 2173 /* 2174 * The HSQ may send a command in interrupt context without polling 2175 * the busy signaling, which means we should return BUSY if controller 2176 * has not released inhibit bits to allow HSQ trying to send request 2177 * again in non-atomic context. So we should not finish this request 2178 * here. 2179 */ 2180 if (!sdhci_send_command(host, cmd)) 2181 ret = -EBUSY; 2182 else 2183 sdhci_led_activate(host); 2184 2185 out_finish: 2186 spin_unlock_irqrestore(&host->lock, flags); 2187 return ret; 2188 } 2189 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2190 2191 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2192 { 2193 u8 ctrl; 2194 2195 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2196 if (width == MMC_BUS_WIDTH_8) { 2197 ctrl &= ~SDHCI_CTRL_4BITBUS; 2198 ctrl |= SDHCI_CTRL_8BITBUS; 2199 } else { 2200 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2201 ctrl &= ~SDHCI_CTRL_8BITBUS; 2202 if (width == MMC_BUS_WIDTH_4) 2203 ctrl |= SDHCI_CTRL_4BITBUS; 2204 else 2205 ctrl &= ~SDHCI_CTRL_4BITBUS; 2206 } 2207 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2208 } 2209 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2210 2211 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2212 { 2213 u16 ctrl_2; 2214 2215 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2216 /* Select Bus Speed Mode for host */ 2217 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2218 if ((timing == MMC_TIMING_MMC_HS200) || 2219 (timing == MMC_TIMING_UHS_SDR104)) 2220 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2221 else if (timing == MMC_TIMING_UHS_SDR12) 2222 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2223 else if (timing == MMC_TIMING_UHS_SDR25) 2224 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2225 else if (timing == MMC_TIMING_UHS_SDR50) 2226 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2227 else if ((timing == MMC_TIMING_UHS_DDR50) || 2228 (timing == MMC_TIMING_MMC_DDR52)) 2229 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2230 else if (timing == MMC_TIMING_MMC_HS400) 2231 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2232 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2233 } 2234 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2235 2236 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2237 { 2238 struct sdhci_host *host = mmc_priv(mmc); 2239 u8 ctrl; 2240 2241 if (ios->power_mode == MMC_POWER_UNDEFINED) 2242 return; 2243 2244 if (host->flags & SDHCI_DEVICE_DEAD) { 2245 if (!IS_ERR(mmc->supply.vmmc) && 2246 ios->power_mode == MMC_POWER_OFF) 2247 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2248 return; 2249 } 2250 2251 /* 2252 * Reset the chip on each power off. 2253 * Should clear out any weird states. 2254 */ 2255 if (ios->power_mode == MMC_POWER_OFF) { 2256 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2257 sdhci_reinit(host); 2258 } 2259 2260 if (host->version >= SDHCI_SPEC_300 && 2261 (ios->power_mode == MMC_POWER_UP) && 2262 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2263 sdhci_enable_preset_value(host, false); 2264 2265 if (!ios->clock || ios->clock != host->clock) { 2266 host->ops->set_clock(host, ios->clock); 2267 host->clock = ios->clock; 2268 2269 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2270 host->clock) { 2271 host->timeout_clk = host->mmc->actual_clock ? 2272 host->mmc->actual_clock / 1000 : 2273 host->clock / 1000; 2274 host->mmc->max_busy_timeout = 2275 host->ops->get_max_timeout_count ? 2276 host->ops->get_max_timeout_count(host) : 2277 1 << 27; 2278 host->mmc->max_busy_timeout /= host->timeout_clk; 2279 } 2280 } 2281 2282 if (host->ops->set_power) 2283 host->ops->set_power(host, ios->power_mode, ios->vdd); 2284 else 2285 sdhci_set_power(host, ios->power_mode, ios->vdd); 2286 2287 if (host->ops->platform_send_init_74_clocks) 2288 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2289 2290 host->ops->set_bus_width(host, ios->bus_width); 2291 2292 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2293 2294 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2295 if (ios->timing == MMC_TIMING_SD_HS || 2296 ios->timing == MMC_TIMING_MMC_HS || 2297 ios->timing == MMC_TIMING_MMC_HS400 || 2298 ios->timing == MMC_TIMING_MMC_HS200 || 2299 ios->timing == MMC_TIMING_MMC_DDR52 || 2300 ios->timing == MMC_TIMING_UHS_SDR50 || 2301 ios->timing == MMC_TIMING_UHS_SDR104 || 2302 ios->timing == MMC_TIMING_UHS_DDR50 || 2303 ios->timing == MMC_TIMING_UHS_SDR25) 2304 ctrl |= SDHCI_CTRL_HISPD; 2305 else 2306 ctrl &= ~SDHCI_CTRL_HISPD; 2307 } 2308 2309 if (host->version >= SDHCI_SPEC_300) { 2310 u16 clk, ctrl_2; 2311 2312 if (!host->preset_enabled) { 2313 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2314 /* 2315 * We only need to set Driver Strength if the 2316 * preset value enable is not set. 2317 */ 2318 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2319 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2320 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2321 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2322 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2323 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2324 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2325 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2326 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2327 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2328 else { 2329 pr_warn("%s: invalid driver type, default to driver type B\n", 2330 mmc_hostname(mmc)); 2331 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2332 } 2333 2334 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2335 } else { 2336 /* 2337 * According to SDHC Spec v3.00, if the Preset Value 2338 * Enable in the Host Control 2 register is set, we 2339 * need to reset SD Clock Enable before changing High 2340 * Speed Enable to avoid generating clock gliches. 2341 */ 2342 2343 /* Reset SD Clock Enable */ 2344 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2345 clk &= ~SDHCI_CLOCK_CARD_EN; 2346 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2347 2348 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2349 2350 /* Re-enable SD Clock */ 2351 host->ops->set_clock(host, host->clock); 2352 } 2353 2354 /* Reset SD Clock Enable */ 2355 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2356 clk &= ~SDHCI_CLOCK_CARD_EN; 2357 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2358 2359 host->ops->set_uhs_signaling(host, ios->timing); 2360 host->timing = ios->timing; 2361 2362 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2363 ((ios->timing == MMC_TIMING_UHS_SDR12) || 2364 (ios->timing == MMC_TIMING_UHS_SDR25) || 2365 (ios->timing == MMC_TIMING_UHS_SDR50) || 2366 (ios->timing == MMC_TIMING_UHS_SDR104) || 2367 (ios->timing == MMC_TIMING_UHS_DDR50) || 2368 (ios->timing == MMC_TIMING_MMC_DDR52))) { 2369 u16 preset; 2370 2371 sdhci_enable_preset_value(host, true); 2372 preset = sdhci_get_preset_value(host); 2373 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2374 preset); 2375 } 2376 2377 /* Re-enable SD Clock */ 2378 host->ops->set_clock(host, host->clock); 2379 } else 2380 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2381 2382 /* 2383 * Some (ENE) controllers go apeshit on some ios operation, 2384 * signalling timeout and CRC errors even on CMD0. Resetting 2385 * it on each ios seems to solve the problem. 2386 */ 2387 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 2388 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2389 } 2390 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2391 2392 static int sdhci_get_cd(struct mmc_host *mmc) 2393 { 2394 struct sdhci_host *host = mmc_priv(mmc); 2395 int gpio_cd = mmc_gpio_get_cd(mmc); 2396 2397 if (host->flags & SDHCI_DEVICE_DEAD) 2398 return 0; 2399 2400 /* If nonremovable, assume that the card is always present. */ 2401 if (!mmc_card_is_removable(host->mmc)) 2402 return 1; 2403 2404 /* 2405 * Try slot gpio detect, if defined it take precedence 2406 * over build in controller functionality 2407 */ 2408 if (gpio_cd >= 0) 2409 return !!gpio_cd; 2410 2411 /* If polling, assume that the card is always present. */ 2412 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2413 return 1; 2414 2415 /* Host native card detect */ 2416 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2417 } 2418 2419 static int sdhci_check_ro(struct sdhci_host *host) 2420 { 2421 unsigned long flags; 2422 int is_readonly; 2423 2424 spin_lock_irqsave(&host->lock, flags); 2425 2426 if (host->flags & SDHCI_DEVICE_DEAD) 2427 is_readonly = 0; 2428 else if (host->ops->get_ro) 2429 is_readonly = host->ops->get_ro(host); 2430 else if (mmc_can_gpio_ro(host->mmc)) 2431 is_readonly = mmc_gpio_get_ro(host->mmc); 2432 else 2433 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2434 & SDHCI_WRITE_PROTECT); 2435 2436 spin_unlock_irqrestore(&host->lock, flags); 2437 2438 /* This quirk needs to be replaced by a callback-function later */ 2439 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2440 !is_readonly : is_readonly; 2441 } 2442 2443 #define SAMPLE_COUNT 5 2444 2445 static int sdhci_get_ro(struct mmc_host *mmc) 2446 { 2447 struct sdhci_host *host = mmc_priv(mmc); 2448 int i, ro_count; 2449 2450 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2451 return sdhci_check_ro(host); 2452 2453 ro_count = 0; 2454 for (i = 0; i < SAMPLE_COUNT; i++) { 2455 if (sdhci_check_ro(host)) { 2456 if (++ro_count > SAMPLE_COUNT / 2) 2457 return 1; 2458 } 2459 msleep(30); 2460 } 2461 return 0; 2462 } 2463 2464 static void sdhci_hw_reset(struct mmc_host *mmc) 2465 { 2466 struct sdhci_host *host = mmc_priv(mmc); 2467 2468 if (host->ops && host->ops->hw_reset) 2469 host->ops->hw_reset(host); 2470 } 2471 2472 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2473 { 2474 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2475 if (enable) 2476 host->ier |= SDHCI_INT_CARD_INT; 2477 else 2478 host->ier &= ~SDHCI_INT_CARD_INT; 2479 2480 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2481 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2482 } 2483 } 2484 2485 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2486 { 2487 struct sdhci_host *host = mmc_priv(mmc); 2488 unsigned long flags; 2489 2490 if (enable) 2491 pm_runtime_get_noresume(host->mmc->parent); 2492 2493 spin_lock_irqsave(&host->lock, flags); 2494 sdhci_enable_sdio_irq_nolock(host, enable); 2495 spin_unlock_irqrestore(&host->lock, flags); 2496 2497 if (!enable) 2498 pm_runtime_put_noidle(host->mmc->parent); 2499 } 2500 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2501 2502 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2503 { 2504 struct sdhci_host *host = mmc_priv(mmc); 2505 unsigned long flags; 2506 2507 spin_lock_irqsave(&host->lock, flags); 2508 sdhci_enable_sdio_irq_nolock(host, true); 2509 spin_unlock_irqrestore(&host->lock, flags); 2510 } 2511 2512 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2513 struct mmc_ios *ios) 2514 { 2515 struct sdhci_host *host = mmc_priv(mmc); 2516 u16 ctrl; 2517 int ret; 2518 2519 /* 2520 * Signal Voltage Switching is only applicable for Host Controllers 2521 * v3.00 and above. 2522 */ 2523 if (host->version < SDHCI_SPEC_300) 2524 return 0; 2525 2526 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2527 2528 switch (ios->signal_voltage) { 2529 case MMC_SIGNAL_VOLTAGE_330: 2530 if (!(host->flags & SDHCI_SIGNALING_330)) 2531 return -EINVAL; 2532 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2533 ctrl &= ~SDHCI_CTRL_VDD_180; 2534 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2535 2536 if (!IS_ERR(mmc->supply.vqmmc)) { 2537 ret = mmc_regulator_set_vqmmc(mmc, ios); 2538 if (ret < 0) { 2539 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2540 mmc_hostname(mmc)); 2541 return -EIO; 2542 } 2543 } 2544 /* Wait for 5ms */ 2545 usleep_range(5000, 5500); 2546 2547 /* 3.3V regulator output should be stable within 5 ms */ 2548 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2549 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2550 return 0; 2551 2552 pr_warn("%s: 3.3V regulator output did not become stable\n", 2553 mmc_hostname(mmc)); 2554 2555 return -EAGAIN; 2556 case MMC_SIGNAL_VOLTAGE_180: 2557 if (!(host->flags & SDHCI_SIGNALING_180)) 2558 return -EINVAL; 2559 if (!IS_ERR(mmc->supply.vqmmc)) { 2560 ret = mmc_regulator_set_vqmmc(mmc, ios); 2561 if (ret < 0) { 2562 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2563 mmc_hostname(mmc)); 2564 return -EIO; 2565 } 2566 } 2567 2568 /* 2569 * Enable 1.8V Signal Enable in the Host Control2 2570 * register 2571 */ 2572 ctrl |= SDHCI_CTRL_VDD_180; 2573 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2574 2575 /* Some controller need to do more when switching */ 2576 if (host->ops->voltage_switch) 2577 host->ops->voltage_switch(host); 2578 2579 /* 1.8V regulator output should be stable within 5 ms */ 2580 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2581 if (ctrl & SDHCI_CTRL_VDD_180) 2582 return 0; 2583 2584 pr_warn("%s: 1.8V regulator output did not become stable\n", 2585 mmc_hostname(mmc)); 2586 2587 return -EAGAIN; 2588 case MMC_SIGNAL_VOLTAGE_120: 2589 if (!(host->flags & SDHCI_SIGNALING_120)) 2590 return -EINVAL; 2591 if (!IS_ERR(mmc->supply.vqmmc)) { 2592 ret = mmc_regulator_set_vqmmc(mmc, ios); 2593 if (ret < 0) { 2594 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2595 mmc_hostname(mmc)); 2596 return -EIO; 2597 } 2598 } 2599 return 0; 2600 default: 2601 /* No signal voltage switch required */ 2602 return 0; 2603 } 2604 } 2605 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2606 2607 static int sdhci_card_busy(struct mmc_host *mmc) 2608 { 2609 struct sdhci_host *host = mmc_priv(mmc); 2610 u32 present_state; 2611 2612 /* Check whether DAT[0] is 0 */ 2613 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2614 2615 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2616 } 2617 2618 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2619 { 2620 struct sdhci_host *host = mmc_priv(mmc); 2621 unsigned long flags; 2622 2623 spin_lock_irqsave(&host->lock, flags); 2624 host->flags |= SDHCI_HS400_TUNING; 2625 spin_unlock_irqrestore(&host->lock, flags); 2626 2627 return 0; 2628 } 2629 2630 void sdhci_start_tuning(struct sdhci_host *host) 2631 { 2632 u16 ctrl; 2633 2634 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2635 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2636 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2637 ctrl |= SDHCI_CTRL_TUNED_CLK; 2638 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2639 2640 /* 2641 * As per the Host Controller spec v3.00, tuning command 2642 * generates Buffer Read Ready interrupt, so enable that. 2643 * 2644 * Note: The spec clearly says that when tuning sequence 2645 * is being performed, the controller does not generate 2646 * interrupts other than Buffer Read Ready interrupt. But 2647 * to make sure we don't hit a controller bug, we _only_ 2648 * enable Buffer Read Ready interrupt here. 2649 */ 2650 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2651 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2652 } 2653 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2654 2655 void sdhci_end_tuning(struct sdhci_host *host) 2656 { 2657 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2658 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2659 } 2660 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2661 2662 void sdhci_reset_tuning(struct sdhci_host *host) 2663 { 2664 u16 ctrl; 2665 2666 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2667 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2668 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2669 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2670 } 2671 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2672 2673 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2674 { 2675 sdhci_reset_tuning(host); 2676 2677 sdhci_do_reset(host, SDHCI_RESET_CMD); 2678 sdhci_do_reset(host, SDHCI_RESET_DATA); 2679 2680 sdhci_end_tuning(host); 2681 2682 mmc_abort_tuning(host->mmc, opcode); 2683 } 2684 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2685 2686 /* 2687 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2688 * tuning command does not have a data payload (or rather the hardware does it 2689 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2690 * interrupt setup is different to other commands and there is no timeout 2691 * interrupt so special handling is needed. 2692 */ 2693 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2694 { 2695 struct mmc_host *mmc = host->mmc; 2696 struct mmc_command cmd = {}; 2697 struct mmc_request mrq = {}; 2698 unsigned long flags; 2699 u32 b = host->sdma_boundary; 2700 2701 spin_lock_irqsave(&host->lock, flags); 2702 2703 cmd.opcode = opcode; 2704 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2705 cmd.mrq = &mrq; 2706 2707 mrq.cmd = &cmd; 2708 /* 2709 * In response to CMD19, the card sends 64 bytes of tuning 2710 * block to the Host Controller. So we set the block size 2711 * to 64 here. 2712 */ 2713 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2714 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2715 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2716 else 2717 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2718 2719 /* 2720 * The tuning block is sent by the card to the host controller. 2721 * So we set the TRNS_READ bit in the Transfer Mode register. 2722 * This also takes care of setting DMA Enable and Multi Block 2723 * Select in the same register to 0. 2724 */ 2725 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2726 2727 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2728 spin_unlock_irqrestore(&host->lock, flags); 2729 host->tuning_done = 0; 2730 return; 2731 } 2732 2733 host->cmd = NULL; 2734 2735 sdhci_del_timer(host, &mrq); 2736 2737 host->tuning_done = 0; 2738 2739 spin_unlock_irqrestore(&host->lock, flags); 2740 2741 /* Wait for Buffer Read Ready interrupt */ 2742 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2743 msecs_to_jiffies(50)); 2744 2745 } 2746 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2747 2748 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2749 { 2750 int i; 2751 2752 /* 2753 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2754 * of loops reaches tuning loop count. 2755 */ 2756 for (i = 0; i < host->tuning_loop_count; i++) { 2757 u16 ctrl; 2758 2759 sdhci_send_tuning(host, opcode); 2760 2761 if (!host->tuning_done) { 2762 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2763 mmc_hostname(host->mmc)); 2764 sdhci_abort_tuning(host, opcode); 2765 return -ETIMEDOUT; 2766 } 2767 2768 /* Spec does not require a delay between tuning cycles */ 2769 if (host->tuning_delay > 0) 2770 mdelay(host->tuning_delay); 2771 2772 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2773 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2774 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2775 return 0; /* Success! */ 2776 break; 2777 } 2778 2779 } 2780 2781 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2782 mmc_hostname(host->mmc)); 2783 sdhci_reset_tuning(host); 2784 return -EAGAIN; 2785 } 2786 2787 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2788 { 2789 struct sdhci_host *host = mmc_priv(mmc); 2790 int err = 0; 2791 unsigned int tuning_count = 0; 2792 bool hs400_tuning; 2793 2794 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2795 2796 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2797 tuning_count = host->tuning_count; 2798 2799 /* 2800 * The Host Controller needs tuning in case of SDR104 and DDR50 2801 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2802 * the Capabilities register. 2803 * If the Host Controller supports the HS200 mode then the 2804 * tuning function has to be executed. 2805 */ 2806 switch (host->timing) { 2807 /* HS400 tuning is done in HS200 mode */ 2808 case MMC_TIMING_MMC_HS400: 2809 err = -EINVAL; 2810 goto out; 2811 2812 case MMC_TIMING_MMC_HS200: 2813 /* 2814 * Periodic re-tuning for HS400 is not expected to be needed, so 2815 * disable it here. 2816 */ 2817 if (hs400_tuning) 2818 tuning_count = 0; 2819 break; 2820 2821 case MMC_TIMING_UHS_SDR104: 2822 case MMC_TIMING_UHS_DDR50: 2823 break; 2824 2825 case MMC_TIMING_UHS_SDR50: 2826 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2827 break; 2828 /* FALLTHROUGH */ 2829 2830 default: 2831 goto out; 2832 } 2833 2834 if (host->ops->platform_execute_tuning) { 2835 err = host->ops->platform_execute_tuning(host, opcode); 2836 goto out; 2837 } 2838 2839 host->mmc->retune_period = tuning_count; 2840 2841 if (host->tuning_delay < 0) 2842 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2843 2844 sdhci_start_tuning(host); 2845 2846 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2847 2848 sdhci_end_tuning(host); 2849 out: 2850 host->flags &= ~SDHCI_HS400_TUNING; 2851 2852 return err; 2853 } 2854 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2855 2856 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2857 { 2858 /* Host Controller v3.00 defines preset value registers */ 2859 if (host->version < SDHCI_SPEC_300) 2860 return; 2861 2862 /* 2863 * We only enable or disable Preset Value if they are not already 2864 * enabled or disabled respectively. Otherwise, we bail out. 2865 */ 2866 if (host->preset_enabled != enable) { 2867 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2868 2869 if (enable) 2870 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2871 else 2872 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2873 2874 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2875 2876 if (enable) 2877 host->flags |= SDHCI_PV_ENABLED; 2878 else 2879 host->flags &= ~SDHCI_PV_ENABLED; 2880 2881 host->preset_enabled = enable; 2882 } 2883 } 2884 2885 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2886 int err) 2887 { 2888 struct sdhci_host *host = mmc_priv(mmc); 2889 struct mmc_data *data = mrq->data; 2890 2891 if (data->host_cookie != COOKIE_UNMAPPED) 2892 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2893 mmc_get_dma_dir(data)); 2894 2895 data->host_cookie = COOKIE_UNMAPPED; 2896 } 2897 2898 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2899 { 2900 struct sdhci_host *host = mmc_priv(mmc); 2901 2902 mrq->data->host_cookie = COOKIE_UNMAPPED; 2903 2904 /* 2905 * No pre-mapping in the pre hook if we're using the bounce buffer, 2906 * for that we would need two bounce buffers since one buffer is 2907 * in flight when this is getting called. 2908 */ 2909 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2910 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2911 } 2912 2913 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2914 { 2915 if (host->data_cmd) { 2916 host->data_cmd->error = err; 2917 sdhci_finish_mrq(host, host->data_cmd->mrq); 2918 } 2919 2920 if (host->cmd) { 2921 host->cmd->error = err; 2922 sdhci_finish_mrq(host, host->cmd->mrq); 2923 } 2924 } 2925 2926 static void sdhci_card_event(struct mmc_host *mmc) 2927 { 2928 struct sdhci_host *host = mmc_priv(mmc); 2929 unsigned long flags; 2930 int present; 2931 2932 /* First check if client has provided their own card event */ 2933 if (host->ops->card_event) 2934 host->ops->card_event(host); 2935 2936 present = mmc->ops->get_cd(mmc); 2937 2938 spin_lock_irqsave(&host->lock, flags); 2939 2940 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2941 if (sdhci_has_requests(host) && !present) { 2942 pr_err("%s: Card removed during transfer!\n", 2943 mmc_hostname(host->mmc)); 2944 pr_err("%s: Resetting controller.\n", 2945 mmc_hostname(host->mmc)); 2946 2947 sdhci_do_reset(host, SDHCI_RESET_CMD); 2948 sdhci_do_reset(host, SDHCI_RESET_DATA); 2949 2950 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2951 } 2952 2953 spin_unlock_irqrestore(&host->lock, flags); 2954 } 2955 2956 static const struct mmc_host_ops sdhci_ops = { 2957 .request = sdhci_request, 2958 .post_req = sdhci_post_req, 2959 .pre_req = sdhci_pre_req, 2960 .set_ios = sdhci_set_ios, 2961 .get_cd = sdhci_get_cd, 2962 .get_ro = sdhci_get_ro, 2963 .hw_reset = sdhci_hw_reset, 2964 .enable_sdio_irq = sdhci_enable_sdio_irq, 2965 .ack_sdio_irq = sdhci_ack_sdio_irq, 2966 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2967 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2968 .execute_tuning = sdhci_execute_tuning, 2969 .card_event = sdhci_card_event, 2970 .card_busy = sdhci_card_busy, 2971 }; 2972 2973 /*****************************************************************************\ 2974 * * 2975 * Request done * 2976 * * 2977 \*****************************************************************************/ 2978 2979 static bool sdhci_request_done(struct sdhci_host *host) 2980 { 2981 unsigned long flags; 2982 struct mmc_request *mrq; 2983 int i; 2984 2985 spin_lock_irqsave(&host->lock, flags); 2986 2987 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2988 mrq = host->mrqs_done[i]; 2989 if (mrq) 2990 break; 2991 } 2992 2993 if (!mrq) { 2994 spin_unlock_irqrestore(&host->lock, flags); 2995 return true; 2996 } 2997 2998 /* 2999 * Always unmap the data buffers if they were mapped by 3000 * sdhci_prepare_data() whenever we finish with a request. 3001 * This avoids leaking DMA mappings on error. 3002 */ 3003 if (host->flags & SDHCI_REQ_USE_DMA) { 3004 struct mmc_data *data = mrq->data; 3005 3006 if (host->use_external_dma && data && 3007 (mrq->cmd->error || data->error)) { 3008 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3009 3010 host->mrqs_done[i] = NULL; 3011 spin_unlock_irqrestore(&host->lock, flags); 3012 dmaengine_terminate_sync(chan); 3013 spin_lock_irqsave(&host->lock, flags); 3014 sdhci_set_mrq_done(host, mrq); 3015 } 3016 3017 if (data && data->host_cookie == COOKIE_MAPPED) { 3018 if (host->bounce_buffer) { 3019 /* 3020 * On reads, copy the bounced data into the 3021 * sglist 3022 */ 3023 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3024 unsigned int length = data->bytes_xfered; 3025 3026 if (length > host->bounce_buffer_size) { 3027 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3028 mmc_hostname(host->mmc), 3029 host->bounce_buffer_size, 3030 data->bytes_xfered); 3031 /* Cap it down and continue */ 3032 length = host->bounce_buffer_size; 3033 } 3034 dma_sync_single_for_cpu( 3035 host->mmc->parent, 3036 host->bounce_addr, 3037 host->bounce_buffer_size, 3038 DMA_FROM_DEVICE); 3039 sg_copy_from_buffer(data->sg, 3040 data->sg_len, 3041 host->bounce_buffer, 3042 length); 3043 } else { 3044 /* No copying, just switch ownership */ 3045 dma_sync_single_for_cpu( 3046 host->mmc->parent, 3047 host->bounce_addr, 3048 host->bounce_buffer_size, 3049 mmc_get_dma_dir(data)); 3050 } 3051 } else { 3052 /* Unmap the raw data */ 3053 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3054 data->sg_len, 3055 mmc_get_dma_dir(data)); 3056 } 3057 data->host_cookie = COOKIE_UNMAPPED; 3058 } 3059 } 3060 3061 /* 3062 * The controller needs a reset of internal state machines 3063 * upon error conditions. 3064 */ 3065 if (sdhci_needs_reset(host, mrq)) { 3066 /* 3067 * Do not finish until command and data lines are available for 3068 * reset. Note there can only be one other mrq, so it cannot 3069 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3070 * would both be null. 3071 */ 3072 if (host->cmd || host->data_cmd) { 3073 spin_unlock_irqrestore(&host->lock, flags); 3074 return true; 3075 } 3076 3077 /* Some controllers need this kick or reset won't work here */ 3078 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3079 /* This is to force an update */ 3080 host->ops->set_clock(host, host->clock); 3081 3082 /* Spec says we should do both at the same time, but Ricoh 3083 controllers do not like that. */ 3084 sdhci_do_reset(host, SDHCI_RESET_CMD); 3085 sdhci_do_reset(host, SDHCI_RESET_DATA); 3086 3087 host->pending_reset = false; 3088 } 3089 3090 host->mrqs_done[i] = NULL; 3091 3092 spin_unlock_irqrestore(&host->lock, flags); 3093 3094 if (host->ops->request_done) 3095 host->ops->request_done(host, mrq); 3096 else 3097 mmc_request_done(host->mmc, mrq); 3098 3099 return false; 3100 } 3101 3102 static void sdhci_complete_work(struct work_struct *work) 3103 { 3104 struct sdhci_host *host = container_of(work, struct sdhci_host, 3105 complete_work); 3106 3107 while (!sdhci_request_done(host)) 3108 ; 3109 } 3110 3111 static void sdhci_timeout_timer(struct timer_list *t) 3112 { 3113 struct sdhci_host *host; 3114 unsigned long flags; 3115 3116 host = from_timer(host, t, timer); 3117 3118 spin_lock_irqsave(&host->lock, flags); 3119 3120 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3121 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3122 mmc_hostname(host->mmc)); 3123 sdhci_dumpregs(host); 3124 3125 host->cmd->error = -ETIMEDOUT; 3126 sdhci_finish_mrq(host, host->cmd->mrq); 3127 } 3128 3129 spin_unlock_irqrestore(&host->lock, flags); 3130 } 3131 3132 static void sdhci_timeout_data_timer(struct timer_list *t) 3133 { 3134 struct sdhci_host *host; 3135 unsigned long flags; 3136 3137 host = from_timer(host, t, data_timer); 3138 3139 spin_lock_irqsave(&host->lock, flags); 3140 3141 if (host->data || host->data_cmd || 3142 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3143 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3144 mmc_hostname(host->mmc)); 3145 sdhci_dumpregs(host); 3146 3147 if (host->data) { 3148 host->data->error = -ETIMEDOUT; 3149 __sdhci_finish_data(host, true); 3150 queue_work(host->complete_wq, &host->complete_work); 3151 } else if (host->data_cmd) { 3152 host->data_cmd->error = -ETIMEDOUT; 3153 sdhci_finish_mrq(host, host->data_cmd->mrq); 3154 } else { 3155 host->cmd->error = -ETIMEDOUT; 3156 sdhci_finish_mrq(host, host->cmd->mrq); 3157 } 3158 } 3159 3160 spin_unlock_irqrestore(&host->lock, flags); 3161 } 3162 3163 /*****************************************************************************\ 3164 * * 3165 * Interrupt handling * 3166 * * 3167 \*****************************************************************************/ 3168 3169 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3170 { 3171 /* Handle auto-CMD12 error */ 3172 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3173 struct mmc_request *mrq = host->data_cmd->mrq; 3174 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3175 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3176 SDHCI_INT_DATA_TIMEOUT : 3177 SDHCI_INT_DATA_CRC; 3178 3179 /* Treat auto-CMD12 error the same as data error */ 3180 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3181 *intmask_p |= data_err_bit; 3182 return; 3183 } 3184 } 3185 3186 if (!host->cmd) { 3187 /* 3188 * SDHCI recovers from errors by resetting the cmd and data 3189 * circuits. Until that is done, there very well might be more 3190 * interrupts, so ignore them in that case. 3191 */ 3192 if (host->pending_reset) 3193 return; 3194 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3195 mmc_hostname(host->mmc), (unsigned)intmask); 3196 sdhci_dumpregs(host); 3197 return; 3198 } 3199 3200 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3201 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3202 if (intmask & SDHCI_INT_TIMEOUT) 3203 host->cmd->error = -ETIMEDOUT; 3204 else 3205 host->cmd->error = -EILSEQ; 3206 3207 /* Treat data command CRC error the same as data CRC error */ 3208 if (host->cmd->data && 3209 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3210 SDHCI_INT_CRC) { 3211 host->cmd = NULL; 3212 *intmask_p |= SDHCI_INT_DATA_CRC; 3213 return; 3214 } 3215 3216 __sdhci_finish_mrq(host, host->cmd->mrq); 3217 return; 3218 } 3219 3220 /* Handle auto-CMD23 error */ 3221 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3222 struct mmc_request *mrq = host->cmd->mrq; 3223 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3224 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3225 -ETIMEDOUT : 3226 -EILSEQ; 3227 3228 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { 3229 mrq->sbc->error = err; 3230 __sdhci_finish_mrq(host, mrq); 3231 return; 3232 } 3233 } 3234 3235 if (intmask & SDHCI_INT_RESPONSE) 3236 sdhci_finish_command(host); 3237 } 3238 3239 static void sdhci_adma_show_error(struct sdhci_host *host) 3240 { 3241 void *desc = host->adma_table; 3242 dma_addr_t dma = host->adma_addr; 3243 3244 sdhci_dumpregs(host); 3245 3246 while (true) { 3247 struct sdhci_adma2_64_desc *dma_desc = desc; 3248 3249 if (host->flags & SDHCI_USE_64_BIT_DMA) 3250 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3251 (unsigned long long)dma, 3252 le32_to_cpu(dma_desc->addr_hi), 3253 le32_to_cpu(dma_desc->addr_lo), 3254 le16_to_cpu(dma_desc->len), 3255 le16_to_cpu(dma_desc->cmd)); 3256 else 3257 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3258 (unsigned long long)dma, 3259 le32_to_cpu(dma_desc->addr_lo), 3260 le16_to_cpu(dma_desc->len), 3261 le16_to_cpu(dma_desc->cmd)); 3262 3263 desc += host->desc_sz; 3264 dma += host->desc_sz; 3265 3266 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3267 break; 3268 } 3269 } 3270 3271 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3272 { 3273 u32 command; 3274 3275 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 3276 if (intmask & SDHCI_INT_DATA_AVAIL) { 3277 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 3278 if (command == MMC_SEND_TUNING_BLOCK || 3279 command == MMC_SEND_TUNING_BLOCK_HS200) { 3280 host->tuning_done = 1; 3281 wake_up(&host->buf_ready_int); 3282 return; 3283 } 3284 } 3285 3286 if (!host->data) { 3287 struct mmc_command *data_cmd = host->data_cmd; 3288 3289 /* 3290 * The "data complete" interrupt is also used to 3291 * indicate that a busy state has ended. See comment 3292 * above in sdhci_cmd_irq(). 3293 */ 3294 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3295 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3296 host->data_cmd = NULL; 3297 data_cmd->error = -ETIMEDOUT; 3298 __sdhci_finish_mrq(host, data_cmd->mrq); 3299 return; 3300 } 3301 if (intmask & SDHCI_INT_DATA_END) { 3302 host->data_cmd = NULL; 3303 /* 3304 * Some cards handle busy-end interrupt 3305 * before the command completed, so make 3306 * sure we do things in the proper order. 3307 */ 3308 if (host->cmd == data_cmd) 3309 return; 3310 3311 __sdhci_finish_mrq(host, data_cmd->mrq); 3312 return; 3313 } 3314 } 3315 3316 /* 3317 * SDHCI recovers from errors by resetting the cmd and data 3318 * circuits. Until that is done, there very well might be more 3319 * interrupts, so ignore them in that case. 3320 */ 3321 if (host->pending_reset) 3322 return; 3323 3324 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3325 mmc_hostname(host->mmc), (unsigned)intmask); 3326 sdhci_dumpregs(host); 3327 3328 return; 3329 } 3330 3331 if (intmask & SDHCI_INT_DATA_TIMEOUT) 3332 host->data->error = -ETIMEDOUT; 3333 else if (intmask & SDHCI_INT_DATA_END_BIT) 3334 host->data->error = -EILSEQ; 3335 else if ((intmask & SDHCI_INT_DATA_CRC) && 3336 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3337 != MMC_BUS_TEST_R) 3338 host->data->error = -EILSEQ; 3339 else if (intmask & SDHCI_INT_ADMA_ERROR) { 3340 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3341 intmask); 3342 sdhci_adma_show_error(host); 3343 host->data->error = -EIO; 3344 if (host->ops->adma_workaround) 3345 host->ops->adma_workaround(host, intmask); 3346 } 3347 3348 if (host->data->error) 3349 sdhci_finish_data(host); 3350 else { 3351 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3352 sdhci_transfer_pio(host); 3353 3354 /* 3355 * We currently don't do anything fancy with DMA 3356 * boundaries, but as we can't disable the feature 3357 * we need to at least restart the transfer. 3358 * 3359 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3360 * should return a valid address to continue from, but as 3361 * some controllers are faulty, don't trust them. 3362 */ 3363 if (intmask & SDHCI_INT_DMA_END) { 3364 dma_addr_t dmastart, dmanow; 3365 3366 dmastart = sdhci_sdma_address(host); 3367 dmanow = dmastart + host->data->bytes_xfered; 3368 /* 3369 * Force update to the next DMA block boundary. 3370 */ 3371 dmanow = (dmanow & 3372 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3373 SDHCI_DEFAULT_BOUNDARY_SIZE; 3374 host->data->bytes_xfered = dmanow - dmastart; 3375 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3376 &dmastart, host->data->bytes_xfered, &dmanow); 3377 sdhci_set_sdma_addr(host, dmanow); 3378 } 3379 3380 if (intmask & SDHCI_INT_DATA_END) { 3381 if (host->cmd == host->data_cmd) { 3382 /* 3383 * Data managed to finish before the 3384 * command completed. Make sure we do 3385 * things in the proper order. 3386 */ 3387 host->data_early = 1; 3388 } else { 3389 sdhci_finish_data(host); 3390 } 3391 } 3392 } 3393 } 3394 3395 static inline bool sdhci_defer_done(struct sdhci_host *host, 3396 struct mmc_request *mrq) 3397 { 3398 struct mmc_data *data = mrq->data; 3399 3400 return host->pending_reset || host->always_defer_done || 3401 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3402 data->host_cookie == COOKIE_MAPPED); 3403 } 3404 3405 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3406 { 3407 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3408 irqreturn_t result = IRQ_NONE; 3409 struct sdhci_host *host = dev_id; 3410 u32 intmask, mask, unexpected = 0; 3411 int max_loops = 16; 3412 int i; 3413 3414 spin_lock(&host->lock); 3415 3416 if (host->runtime_suspended) { 3417 spin_unlock(&host->lock); 3418 return IRQ_NONE; 3419 } 3420 3421 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3422 if (!intmask || intmask == 0xffffffff) { 3423 result = IRQ_NONE; 3424 goto out; 3425 } 3426 3427 do { 3428 DBG("IRQ status 0x%08x\n", intmask); 3429 3430 if (host->ops->irq) { 3431 intmask = host->ops->irq(host, intmask); 3432 if (!intmask) 3433 goto cont; 3434 } 3435 3436 /* Clear selected interrupts. */ 3437 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3438 SDHCI_INT_BUS_POWER); 3439 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3440 3441 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3442 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3443 SDHCI_CARD_PRESENT; 3444 3445 /* 3446 * There is a observation on i.mx esdhc. INSERT 3447 * bit will be immediately set again when it gets 3448 * cleared, if a card is inserted. We have to mask 3449 * the irq to prevent interrupt storm which will 3450 * freeze the system. And the REMOVE gets the 3451 * same situation. 3452 * 3453 * More testing are needed here to ensure it works 3454 * for other platforms though. 3455 */ 3456 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3457 SDHCI_INT_CARD_REMOVE); 3458 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3459 SDHCI_INT_CARD_INSERT; 3460 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3461 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3462 3463 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3464 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3465 3466 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3467 SDHCI_INT_CARD_REMOVE); 3468 result = IRQ_WAKE_THREAD; 3469 } 3470 3471 if (intmask & SDHCI_INT_CMD_MASK) 3472 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3473 3474 if (intmask & SDHCI_INT_DATA_MASK) 3475 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3476 3477 if (intmask & SDHCI_INT_BUS_POWER) 3478 pr_err("%s: Card is consuming too much power!\n", 3479 mmc_hostname(host->mmc)); 3480 3481 if (intmask & SDHCI_INT_RETUNE) 3482 mmc_retune_needed(host->mmc); 3483 3484 if ((intmask & SDHCI_INT_CARD_INT) && 3485 (host->ier & SDHCI_INT_CARD_INT)) { 3486 sdhci_enable_sdio_irq_nolock(host, false); 3487 sdio_signal_irq(host->mmc); 3488 } 3489 3490 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3491 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3492 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3493 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3494 3495 if (intmask) { 3496 unexpected |= intmask; 3497 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3498 } 3499 cont: 3500 if (result == IRQ_NONE) 3501 result = IRQ_HANDLED; 3502 3503 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3504 } while (intmask && --max_loops); 3505 3506 /* Determine if mrqs can be completed immediately */ 3507 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3508 struct mmc_request *mrq = host->mrqs_done[i]; 3509 3510 if (!mrq) 3511 continue; 3512 3513 if (sdhci_defer_done(host, mrq)) { 3514 result = IRQ_WAKE_THREAD; 3515 } else { 3516 mrqs_done[i] = mrq; 3517 host->mrqs_done[i] = NULL; 3518 } 3519 } 3520 out: 3521 if (host->deferred_cmd) 3522 result = IRQ_WAKE_THREAD; 3523 3524 spin_unlock(&host->lock); 3525 3526 /* Process mrqs ready for immediate completion */ 3527 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3528 if (!mrqs_done[i]) 3529 continue; 3530 3531 if (host->ops->request_done) 3532 host->ops->request_done(host, mrqs_done[i]); 3533 else 3534 mmc_request_done(host->mmc, mrqs_done[i]); 3535 } 3536 3537 if (unexpected) { 3538 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3539 mmc_hostname(host->mmc), unexpected); 3540 sdhci_dumpregs(host); 3541 } 3542 3543 return result; 3544 } 3545 3546 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3547 { 3548 struct sdhci_host *host = dev_id; 3549 struct mmc_command *cmd; 3550 unsigned long flags; 3551 u32 isr; 3552 3553 while (!sdhci_request_done(host)) 3554 ; 3555 3556 spin_lock_irqsave(&host->lock, flags); 3557 3558 isr = host->thread_isr; 3559 host->thread_isr = 0; 3560 3561 cmd = host->deferred_cmd; 3562 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3563 sdhci_finish_mrq(host, cmd->mrq); 3564 3565 spin_unlock_irqrestore(&host->lock, flags); 3566 3567 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3568 struct mmc_host *mmc = host->mmc; 3569 3570 mmc->ops->card_event(mmc); 3571 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3572 } 3573 3574 return IRQ_HANDLED; 3575 } 3576 3577 /*****************************************************************************\ 3578 * * 3579 * Suspend/resume * 3580 * * 3581 \*****************************************************************************/ 3582 3583 #ifdef CONFIG_PM 3584 3585 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3586 { 3587 return mmc_card_is_removable(host->mmc) && 3588 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3589 !mmc_can_gpio_cd(host->mmc); 3590 } 3591 3592 /* 3593 * To enable wakeup events, the corresponding events have to be enabled in 3594 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3595 * Table' in the SD Host Controller Standard Specification. 3596 * It is useless to restore SDHCI_INT_ENABLE state in 3597 * sdhci_disable_irq_wakeups() since it will be set by 3598 * sdhci_enable_card_detection() or sdhci_init(). 3599 */ 3600 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3601 { 3602 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3603 SDHCI_WAKE_ON_INT; 3604 u32 irq_val = 0; 3605 u8 wake_val = 0; 3606 u8 val; 3607 3608 if (sdhci_cd_irq_can_wakeup(host)) { 3609 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3610 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3611 } 3612 3613 if (mmc_card_wake_sdio_irq(host->mmc)) { 3614 wake_val |= SDHCI_WAKE_ON_INT; 3615 irq_val |= SDHCI_INT_CARD_INT; 3616 } 3617 3618 if (!irq_val) 3619 return false; 3620 3621 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3622 val &= ~mask; 3623 val |= wake_val; 3624 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3625 3626 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3627 3628 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3629 3630 return host->irq_wake_enabled; 3631 } 3632 3633 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3634 { 3635 u8 val; 3636 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3637 | SDHCI_WAKE_ON_INT; 3638 3639 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3640 val &= ~mask; 3641 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3642 3643 disable_irq_wake(host->irq); 3644 3645 host->irq_wake_enabled = false; 3646 } 3647 3648 int sdhci_suspend_host(struct sdhci_host *host) 3649 { 3650 sdhci_disable_card_detection(host); 3651 3652 mmc_retune_timer_stop(host->mmc); 3653 3654 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3655 !sdhci_enable_irq_wakeups(host)) { 3656 host->ier = 0; 3657 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3658 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3659 free_irq(host->irq, host); 3660 } 3661 3662 return 0; 3663 } 3664 3665 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3666 3667 int sdhci_resume_host(struct sdhci_host *host) 3668 { 3669 struct mmc_host *mmc = host->mmc; 3670 int ret = 0; 3671 3672 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3673 if (host->ops->enable_dma) 3674 host->ops->enable_dma(host); 3675 } 3676 3677 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 3678 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3679 /* Card keeps power but host controller does not */ 3680 sdhci_init(host, 0); 3681 host->pwr = 0; 3682 host->clock = 0; 3683 mmc->ops->set_ios(mmc, &mmc->ios); 3684 } else { 3685 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 3686 } 3687 3688 if (host->irq_wake_enabled) { 3689 sdhci_disable_irq_wakeups(host); 3690 } else { 3691 ret = request_threaded_irq(host->irq, sdhci_irq, 3692 sdhci_thread_irq, IRQF_SHARED, 3693 mmc_hostname(host->mmc), host); 3694 if (ret) 3695 return ret; 3696 } 3697 3698 sdhci_enable_card_detection(host); 3699 3700 return ret; 3701 } 3702 3703 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3704 3705 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3706 { 3707 unsigned long flags; 3708 3709 mmc_retune_timer_stop(host->mmc); 3710 3711 spin_lock_irqsave(&host->lock, flags); 3712 host->ier &= SDHCI_INT_CARD_INT; 3713 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3714 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3715 spin_unlock_irqrestore(&host->lock, flags); 3716 3717 synchronize_hardirq(host->irq); 3718 3719 spin_lock_irqsave(&host->lock, flags); 3720 host->runtime_suspended = true; 3721 spin_unlock_irqrestore(&host->lock, flags); 3722 3723 return 0; 3724 } 3725 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3726 3727 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3728 { 3729 struct mmc_host *mmc = host->mmc; 3730 unsigned long flags; 3731 int host_flags = host->flags; 3732 3733 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3734 if (host->ops->enable_dma) 3735 host->ops->enable_dma(host); 3736 } 3737 3738 sdhci_init(host, soft_reset); 3739 3740 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3741 mmc->ios.power_mode != MMC_POWER_OFF) { 3742 /* Force clock and power re-program */ 3743 host->pwr = 0; 3744 host->clock = 0; 3745 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3746 mmc->ops->set_ios(mmc, &mmc->ios); 3747 3748 if ((host_flags & SDHCI_PV_ENABLED) && 3749 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3750 spin_lock_irqsave(&host->lock, flags); 3751 sdhci_enable_preset_value(host, true); 3752 spin_unlock_irqrestore(&host->lock, flags); 3753 } 3754 3755 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3756 mmc->ops->hs400_enhanced_strobe) 3757 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3758 } 3759 3760 spin_lock_irqsave(&host->lock, flags); 3761 3762 host->runtime_suspended = false; 3763 3764 /* Enable SDIO IRQ */ 3765 if (sdio_irq_claimed(mmc)) 3766 sdhci_enable_sdio_irq_nolock(host, true); 3767 3768 /* Enable Card Detection */ 3769 sdhci_enable_card_detection(host); 3770 3771 spin_unlock_irqrestore(&host->lock, flags); 3772 3773 return 0; 3774 } 3775 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3776 3777 #endif /* CONFIG_PM */ 3778 3779 /*****************************************************************************\ 3780 * * 3781 * Command Queue Engine (CQE) helpers * 3782 * * 3783 \*****************************************************************************/ 3784 3785 void sdhci_cqe_enable(struct mmc_host *mmc) 3786 { 3787 struct sdhci_host *host = mmc_priv(mmc); 3788 unsigned long flags; 3789 u8 ctrl; 3790 3791 spin_lock_irqsave(&host->lock, flags); 3792 3793 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3794 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3795 /* 3796 * Host from V4.10 supports ADMA3 DMA type. 3797 * ADMA3 performs integrated descriptor which is more suitable 3798 * for cmd queuing to fetch both command and transfer descriptors. 3799 */ 3800 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3801 ctrl |= SDHCI_CTRL_ADMA3; 3802 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3803 ctrl |= SDHCI_CTRL_ADMA64; 3804 else 3805 ctrl |= SDHCI_CTRL_ADMA32; 3806 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3807 3808 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3809 SDHCI_BLOCK_SIZE); 3810 3811 /* Set maximum timeout */ 3812 sdhci_set_timeout(host, NULL); 3813 3814 host->ier = host->cqe_ier; 3815 3816 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3817 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3818 3819 host->cqe_on = true; 3820 3821 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3822 mmc_hostname(mmc), host->ier, 3823 sdhci_readl(host, SDHCI_INT_STATUS)); 3824 3825 spin_unlock_irqrestore(&host->lock, flags); 3826 } 3827 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3828 3829 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3830 { 3831 struct sdhci_host *host = mmc_priv(mmc); 3832 unsigned long flags; 3833 3834 spin_lock_irqsave(&host->lock, flags); 3835 3836 sdhci_set_default_irqs(host); 3837 3838 host->cqe_on = false; 3839 3840 if (recovery) { 3841 sdhci_do_reset(host, SDHCI_RESET_CMD); 3842 sdhci_do_reset(host, SDHCI_RESET_DATA); 3843 } 3844 3845 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3846 mmc_hostname(mmc), host->ier, 3847 sdhci_readl(host, SDHCI_INT_STATUS)); 3848 3849 spin_unlock_irqrestore(&host->lock, flags); 3850 } 3851 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3852 3853 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3854 int *data_error) 3855 { 3856 u32 mask; 3857 3858 if (!host->cqe_on) 3859 return false; 3860 3861 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3862 *cmd_error = -EILSEQ; 3863 else if (intmask & SDHCI_INT_TIMEOUT) 3864 *cmd_error = -ETIMEDOUT; 3865 else 3866 *cmd_error = 0; 3867 3868 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3869 *data_error = -EILSEQ; 3870 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3871 *data_error = -ETIMEDOUT; 3872 else if (intmask & SDHCI_INT_ADMA_ERROR) 3873 *data_error = -EIO; 3874 else 3875 *data_error = 0; 3876 3877 /* Clear selected interrupts. */ 3878 mask = intmask & host->cqe_ier; 3879 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3880 3881 if (intmask & SDHCI_INT_BUS_POWER) 3882 pr_err("%s: Card is consuming too much power!\n", 3883 mmc_hostname(host->mmc)); 3884 3885 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3886 if (intmask) { 3887 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3888 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3889 mmc_hostname(host->mmc), intmask); 3890 sdhci_dumpregs(host); 3891 } 3892 3893 return true; 3894 } 3895 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3896 3897 /*****************************************************************************\ 3898 * * 3899 * Device allocation/registration * 3900 * * 3901 \*****************************************************************************/ 3902 3903 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3904 size_t priv_size) 3905 { 3906 struct mmc_host *mmc; 3907 struct sdhci_host *host; 3908 3909 WARN_ON(dev == NULL); 3910 3911 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3912 if (!mmc) 3913 return ERR_PTR(-ENOMEM); 3914 3915 host = mmc_priv(mmc); 3916 host->mmc = mmc; 3917 host->mmc_host_ops = sdhci_ops; 3918 mmc->ops = &host->mmc_host_ops; 3919 3920 host->flags = SDHCI_SIGNALING_330; 3921 3922 host->cqe_ier = SDHCI_CQE_INT_MASK; 3923 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3924 3925 host->tuning_delay = -1; 3926 host->tuning_loop_count = MAX_TUNING_LOOP; 3927 3928 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3929 3930 /* 3931 * The DMA table descriptor count is calculated as the maximum 3932 * number of segments times 2, to allow for an alignment 3933 * descriptor for each segment, plus 1 for a nop end descriptor. 3934 */ 3935 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 3936 3937 return host; 3938 } 3939 3940 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3941 3942 static int sdhci_set_dma_mask(struct sdhci_host *host) 3943 { 3944 struct mmc_host *mmc = host->mmc; 3945 struct device *dev = mmc_dev(mmc); 3946 int ret = -EINVAL; 3947 3948 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3949 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3950 3951 /* Try 64-bit mask if hardware is capable of it */ 3952 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3953 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3954 if (ret) { 3955 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3956 mmc_hostname(mmc)); 3957 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3958 } 3959 } 3960 3961 /* 32-bit mask as default & fallback */ 3962 if (ret) { 3963 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3964 if (ret) 3965 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 3966 mmc_hostname(mmc)); 3967 } 3968 3969 return ret; 3970 } 3971 3972 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 3973 const u32 *caps, const u32 *caps1) 3974 { 3975 u16 v; 3976 u64 dt_caps_mask = 0; 3977 u64 dt_caps = 0; 3978 3979 if (host->read_caps) 3980 return; 3981 3982 host->read_caps = true; 3983 3984 if (debug_quirks) 3985 host->quirks = debug_quirks; 3986 3987 if (debug_quirks2) 3988 host->quirks2 = debug_quirks2; 3989 3990 sdhci_do_reset(host, SDHCI_RESET_ALL); 3991 3992 if (host->v4_mode) 3993 sdhci_do_enable_v4_mode(host); 3994 3995 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3996 "sdhci-caps-mask", &dt_caps_mask); 3997 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3998 "sdhci-caps", &dt_caps); 3999 4000 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4001 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4002 4003 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 4004 return; 4005 4006 if (caps) { 4007 host->caps = *caps; 4008 } else { 4009 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4010 host->caps &= ~lower_32_bits(dt_caps_mask); 4011 host->caps |= lower_32_bits(dt_caps); 4012 } 4013 4014 if (host->version < SDHCI_SPEC_300) 4015 return; 4016 4017 if (caps1) { 4018 host->caps1 = *caps1; 4019 } else { 4020 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4021 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4022 host->caps1 |= upper_32_bits(dt_caps); 4023 } 4024 } 4025 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4026 4027 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4028 { 4029 struct mmc_host *mmc = host->mmc; 4030 unsigned int max_blocks; 4031 unsigned int bounce_size; 4032 int ret; 4033 4034 /* 4035 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4036 * has diminishing returns, this is probably because SD/MMC 4037 * cards are usually optimized to handle this size of requests. 4038 */ 4039 bounce_size = SZ_64K; 4040 /* 4041 * Adjust downwards to maximum request size if this is less 4042 * than our segment size, else hammer down the maximum 4043 * request size to the maximum buffer size. 4044 */ 4045 if (mmc->max_req_size < bounce_size) 4046 bounce_size = mmc->max_req_size; 4047 max_blocks = bounce_size / 512; 4048 4049 /* 4050 * When we just support one segment, we can get significant 4051 * speedups by the help of a bounce buffer to group scattered 4052 * reads/writes together. 4053 */ 4054 host->bounce_buffer = devm_kmalloc(mmc->parent, 4055 bounce_size, 4056 GFP_KERNEL); 4057 if (!host->bounce_buffer) { 4058 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4059 mmc_hostname(mmc), 4060 bounce_size); 4061 /* 4062 * Exiting with zero here makes sure we proceed with 4063 * mmc->max_segs == 1. 4064 */ 4065 return; 4066 } 4067 4068 host->bounce_addr = dma_map_single(mmc->parent, 4069 host->bounce_buffer, 4070 bounce_size, 4071 DMA_BIDIRECTIONAL); 4072 ret = dma_mapping_error(mmc->parent, host->bounce_addr); 4073 if (ret) 4074 /* Again fall back to max_segs == 1 */ 4075 return; 4076 host->bounce_buffer_size = bounce_size; 4077 4078 /* Lie about this since we're bouncing */ 4079 mmc->max_segs = max_blocks; 4080 mmc->max_seg_size = bounce_size; 4081 mmc->max_req_size = bounce_size; 4082 4083 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4084 mmc_hostname(mmc), max_blocks, bounce_size); 4085 } 4086 4087 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4088 { 4089 /* 4090 * According to SD Host Controller spec v4.10, bit[27] added from 4091 * version 4.10 in Capabilities Register is used as 64-bit System 4092 * Address support for V4 mode. 4093 */ 4094 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4095 return host->caps & SDHCI_CAN_64BIT_V4; 4096 4097 return host->caps & SDHCI_CAN_64BIT; 4098 } 4099 4100 int sdhci_setup_host(struct sdhci_host *host) 4101 { 4102 struct mmc_host *mmc; 4103 u32 max_current_caps; 4104 unsigned int ocr_avail; 4105 unsigned int override_timeout_clk; 4106 u32 max_clk; 4107 int ret = 0; 4108 bool enable_vqmmc = false; 4109 4110 WARN_ON(host == NULL); 4111 if (host == NULL) 4112 return -EINVAL; 4113 4114 mmc = host->mmc; 4115 4116 /* 4117 * If there are external regulators, get them. Note this must be done 4118 * early before resetting the host and reading the capabilities so that 4119 * the host can take the appropriate action if regulators are not 4120 * available. 4121 */ 4122 if (!mmc->supply.vqmmc) { 4123 ret = mmc_regulator_get_supply(mmc); 4124 if (ret) 4125 return ret; 4126 enable_vqmmc = true; 4127 } 4128 4129 DBG("Version: 0x%08x | Present: 0x%08x\n", 4130 sdhci_readw(host, SDHCI_HOST_VERSION), 4131 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4132 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4133 sdhci_readl(host, SDHCI_CAPABILITIES), 4134 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4135 4136 sdhci_read_caps(host); 4137 4138 override_timeout_clk = host->timeout_clk; 4139 4140 if (host->version > SDHCI_SPEC_420) { 4141 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4142 mmc_hostname(mmc), host->version); 4143 } 4144 4145 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4146 host->flags |= SDHCI_USE_SDMA; 4147 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4148 DBG("Controller doesn't have SDMA capability\n"); 4149 else 4150 host->flags |= SDHCI_USE_SDMA; 4151 4152 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4153 (host->flags & SDHCI_USE_SDMA)) { 4154 DBG("Disabling DMA as it is marked broken\n"); 4155 host->flags &= ~SDHCI_USE_SDMA; 4156 } 4157 4158 if ((host->version >= SDHCI_SPEC_200) && 4159 (host->caps & SDHCI_CAN_DO_ADMA2)) 4160 host->flags |= SDHCI_USE_ADMA; 4161 4162 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4163 (host->flags & SDHCI_USE_ADMA)) { 4164 DBG("Disabling ADMA as it is marked broken\n"); 4165 host->flags &= ~SDHCI_USE_ADMA; 4166 } 4167 4168 if (sdhci_can_64bit_dma(host)) 4169 host->flags |= SDHCI_USE_64_BIT_DMA; 4170 4171 if (host->use_external_dma) { 4172 ret = sdhci_external_dma_init(host); 4173 if (ret == -EPROBE_DEFER) 4174 goto unreg; 4175 /* 4176 * Fall back to use the DMA/PIO integrated in standard SDHCI 4177 * instead of external DMA devices. 4178 */ 4179 else if (ret) 4180 sdhci_switch_external_dma(host, false); 4181 /* Disable internal DMA sources */ 4182 else 4183 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4184 } 4185 4186 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4187 if (host->ops->set_dma_mask) 4188 ret = host->ops->set_dma_mask(host); 4189 else 4190 ret = sdhci_set_dma_mask(host); 4191 4192 if (!ret && host->ops->enable_dma) 4193 ret = host->ops->enable_dma(host); 4194 4195 if (ret) { 4196 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4197 mmc_hostname(mmc)); 4198 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4199 4200 ret = 0; 4201 } 4202 } 4203 4204 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4205 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4206 host->flags &= ~SDHCI_USE_SDMA; 4207 4208 if (host->flags & SDHCI_USE_ADMA) { 4209 dma_addr_t dma; 4210 void *buf; 4211 4212 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4213 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4214 else if (!host->alloc_desc_sz) 4215 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4216 4217 host->desc_sz = host->alloc_desc_sz; 4218 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4219 4220 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4221 /* 4222 * Use zalloc to zero the reserved high 32-bits of 128-bit 4223 * descriptors so that they never need to be written. 4224 */ 4225 buf = dma_alloc_coherent(mmc_dev(mmc), 4226 host->align_buffer_sz + host->adma_table_sz, 4227 &dma, GFP_KERNEL); 4228 if (!buf) { 4229 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4230 mmc_hostname(mmc)); 4231 host->flags &= ~SDHCI_USE_ADMA; 4232 } else if ((dma + host->align_buffer_sz) & 4233 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4234 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4235 mmc_hostname(mmc)); 4236 host->flags &= ~SDHCI_USE_ADMA; 4237 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4238 host->adma_table_sz, buf, dma); 4239 } else { 4240 host->align_buffer = buf; 4241 host->align_addr = dma; 4242 4243 host->adma_table = buf + host->align_buffer_sz; 4244 host->adma_addr = dma + host->align_buffer_sz; 4245 } 4246 } 4247 4248 /* 4249 * If we use DMA, then it's up to the caller to set the DMA 4250 * mask, but PIO does not need the hw shim so we set a new 4251 * mask here in that case. 4252 */ 4253 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4254 host->dma_mask = DMA_BIT_MASK(64); 4255 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4256 } 4257 4258 if (host->version >= SDHCI_SPEC_300) 4259 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4260 else 4261 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4262 4263 host->max_clk *= 1000000; 4264 if (host->max_clk == 0 || host->quirks & 4265 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4266 if (!host->ops->get_max_clock) { 4267 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4268 mmc_hostname(mmc)); 4269 ret = -ENODEV; 4270 goto undma; 4271 } 4272 host->max_clk = host->ops->get_max_clock(host); 4273 } 4274 4275 /* 4276 * In case of Host Controller v3.00, find out whether clock 4277 * multiplier is supported. 4278 */ 4279 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4280 4281 /* 4282 * In case the value in Clock Multiplier is 0, then programmable 4283 * clock mode is not supported, otherwise the actual clock 4284 * multiplier is one more than the value of Clock Multiplier 4285 * in the Capabilities Register. 4286 */ 4287 if (host->clk_mul) 4288 host->clk_mul += 1; 4289 4290 /* 4291 * Set host parameters. 4292 */ 4293 max_clk = host->max_clk; 4294 4295 if (host->ops->get_min_clock) 4296 mmc->f_min = host->ops->get_min_clock(host); 4297 else if (host->version >= SDHCI_SPEC_300) { 4298 if (host->clk_mul) 4299 max_clk = host->max_clk * host->clk_mul; 4300 /* 4301 * Divided Clock Mode minimum clock rate is always less than 4302 * Programmable Clock Mode minimum clock rate. 4303 */ 4304 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4305 } else 4306 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4307 4308 if (!mmc->f_max || mmc->f_max > max_clk) 4309 mmc->f_max = max_clk; 4310 4311 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4312 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4313 4314 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4315 host->timeout_clk *= 1000; 4316 4317 if (host->timeout_clk == 0) { 4318 if (!host->ops->get_timeout_clock) { 4319 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4320 mmc_hostname(mmc)); 4321 ret = -ENODEV; 4322 goto undma; 4323 } 4324 4325 host->timeout_clk = 4326 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4327 1000); 4328 } 4329 4330 if (override_timeout_clk) 4331 host->timeout_clk = override_timeout_clk; 4332 4333 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4334 host->ops->get_max_timeout_count(host) : 1 << 27; 4335 mmc->max_busy_timeout /= host->timeout_clk; 4336 } 4337 4338 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4339 !host->ops->get_max_timeout_count) 4340 mmc->max_busy_timeout = 0; 4341 4342 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4343 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4344 4345 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4346 host->flags |= SDHCI_AUTO_CMD12; 4347 4348 /* 4349 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4350 * For v4 mode, SDMA may use Auto-CMD23 as well. 4351 */ 4352 if ((host->version >= SDHCI_SPEC_300) && 4353 ((host->flags & SDHCI_USE_ADMA) || 4354 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4355 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4356 host->flags |= SDHCI_AUTO_CMD23; 4357 DBG("Auto-CMD23 available\n"); 4358 } else { 4359 DBG("Auto-CMD23 unavailable\n"); 4360 } 4361 4362 /* 4363 * A controller may support 8-bit width, but the board itself 4364 * might not have the pins brought out. Boards that support 4365 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4366 * their platform code before calling sdhci_add_host(), and we 4367 * won't assume 8-bit width for hosts without that CAP. 4368 */ 4369 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4370 mmc->caps |= MMC_CAP_4_BIT_DATA; 4371 4372 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4373 mmc->caps &= ~MMC_CAP_CMD23; 4374 4375 if (host->caps & SDHCI_CAN_DO_HISPD) 4376 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4377 4378 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4379 mmc_card_is_removable(mmc) && 4380 mmc_gpio_get_cd(host->mmc) < 0) 4381 mmc->caps |= MMC_CAP_NEEDS_POLL; 4382 4383 if (!IS_ERR(mmc->supply.vqmmc)) { 4384 if (enable_vqmmc) { 4385 ret = regulator_enable(mmc->supply.vqmmc); 4386 host->sdhci_core_to_disable_vqmmc = !ret; 4387 } 4388 4389 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4390 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4391 1950000)) 4392 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4393 SDHCI_SUPPORT_SDR50 | 4394 SDHCI_SUPPORT_DDR50); 4395 4396 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4397 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4398 3600000)) 4399 host->flags &= ~SDHCI_SIGNALING_330; 4400 4401 if (ret) { 4402 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4403 mmc_hostname(mmc), ret); 4404 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4405 } 4406 4407 } 4408 4409 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4410 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4411 SDHCI_SUPPORT_DDR50); 4412 /* 4413 * The SDHCI controller in a SoC might support HS200/HS400 4414 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4415 * but if the board is modeled such that the IO lines are not 4416 * connected to 1.8v then HS200/HS400 cannot be supported. 4417 * Disable HS200/HS400 if the board does not have 1.8v connected 4418 * to the IO lines. (Applicable for other modes in 1.8v) 4419 */ 4420 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4421 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4422 } 4423 4424 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4425 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4426 SDHCI_SUPPORT_DDR50)) 4427 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4428 4429 /* SDR104 supports also implies SDR50 support */ 4430 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4431 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4432 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4433 * field can be promoted to support HS200. 4434 */ 4435 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4436 mmc->caps2 |= MMC_CAP2_HS200; 4437 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4438 mmc->caps |= MMC_CAP_UHS_SDR50; 4439 } 4440 4441 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4442 (host->caps1 & SDHCI_SUPPORT_HS400)) 4443 mmc->caps2 |= MMC_CAP2_HS400; 4444 4445 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4446 (IS_ERR(mmc->supply.vqmmc) || 4447 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4448 1300000))) 4449 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4450 4451 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4452 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4453 mmc->caps |= MMC_CAP_UHS_DDR50; 4454 4455 /* Does the host need tuning for SDR50? */ 4456 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4457 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4458 4459 /* Driver Type(s) (A, C, D) supported by the host */ 4460 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4461 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4462 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4463 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4464 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4465 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4466 4467 /* Initial value for re-tuning timer count */ 4468 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4469 host->caps1); 4470 4471 /* 4472 * In case Re-tuning Timer is not disabled, the actual value of 4473 * re-tuning timer will be 2 ^ (n - 1). 4474 */ 4475 if (host->tuning_count) 4476 host->tuning_count = 1 << (host->tuning_count - 1); 4477 4478 /* Re-tuning mode supported by the Host Controller */ 4479 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4480 4481 ocr_avail = 0; 4482 4483 /* 4484 * According to SD Host Controller spec v3.00, if the Host System 4485 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4486 * the value is meaningful only if Voltage Support in the Capabilities 4487 * register is set. The actual current value is 4 times the register 4488 * value. 4489 */ 4490 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4491 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4492 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4493 if (curr > 0) { 4494 4495 /* convert to SDHCI_MAX_CURRENT format */ 4496 curr = curr/1000; /* convert to mA */ 4497 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4498 4499 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4500 max_current_caps = 4501 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4502 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4503 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4504 } 4505 } 4506 4507 if (host->caps & SDHCI_CAN_VDD_330) { 4508 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4509 4510 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4511 max_current_caps) * 4512 SDHCI_MAX_CURRENT_MULTIPLIER; 4513 } 4514 if (host->caps & SDHCI_CAN_VDD_300) { 4515 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4516 4517 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4518 max_current_caps) * 4519 SDHCI_MAX_CURRENT_MULTIPLIER; 4520 } 4521 if (host->caps & SDHCI_CAN_VDD_180) { 4522 ocr_avail |= MMC_VDD_165_195; 4523 4524 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4525 max_current_caps) * 4526 SDHCI_MAX_CURRENT_MULTIPLIER; 4527 } 4528 4529 /* If OCR set by host, use it instead. */ 4530 if (host->ocr_mask) 4531 ocr_avail = host->ocr_mask; 4532 4533 /* If OCR set by external regulators, give it highest prio. */ 4534 if (mmc->ocr_avail) 4535 ocr_avail = mmc->ocr_avail; 4536 4537 mmc->ocr_avail = ocr_avail; 4538 mmc->ocr_avail_sdio = ocr_avail; 4539 if (host->ocr_avail_sdio) 4540 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4541 mmc->ocr_avail_sd = ocr_avail; 4542 if (host->ocr_avail_sd) 4543 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4544 else /* normal SD controllers don't support 1.8V */ 4545 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4546 mmc->ocr_avail_mmc = ocr_avail; 4547 if (host->ocr_avail_mmc) 4548 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4549 4550 if (mmc->ocr_avail == 0) { 4551 pr_err("%s: Hardware doesn't report any support voltages.\n", 4552 mmc_hostname(mmc)); 4553 ret = -ENODEV; 4554 goto unreg; 4555 } 4556 4557 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4558 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4559 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4560 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4561 host->flags |= SDHCI_SIGNALING_180; 4562 4563 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4564 host->flags |= SDHCI_SIGNALING_120; 4565 4566 spin_lock_init(&host->lock); 4567 4568 /* 4569 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4570 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4571 * is less anyway. 4572 */ 4573 mmc->max_req_size = 524288; 4574 4575 /* 4576 * Maximum number of segments. Depends on if the hardware 4577 * can do scatter/gather or not. 4578 */ 4579 if (host->flags & SDHCI_USE_ADMA) { 4580 mmc->max_segs = SDHCI_MAX_SEGS; 4581 } else if (host->flags & SDHCI_USE_SDMA) { 4582 mmc->max_segs = 1; 4583 if (swiotlb_max_segment()) { 4584 unsigned int max_req_size = (1 << IO_TLB_SHIFT) * 4585 IO_TLB_SEGSIZE; 4586 mmc->max_req_size = min(mmc->max_req_size, 4587 max_req_size); 4588 } 4589 } else { /* PIO */ 4590 mmc->max_segs = SDHCI_MAX_SEGS; 4591 } 4592 4593 /* 4594 * Maximum segment size. Could be one segment with the maximum number 4595 * of bytes. When doing hardware scatter/gather, each entry cannot 4596 * be larger than 64 KiB though. 4597 */ 4598 if (host->flags & SDHCI_USE_ADMA) { 4599 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 4600 mmc->max_seg_size = 65535; 4601 else 4602 mmc->max_seg_size = 65536; 4603 } else { 4604 mmc->max_seg_size = mmc->max_req_size; 4605 } 4606 4607 /* 4608 * Maximum block size. This varies from controller to controller and 4609 * is specified in the capabilities register. 4610 */ 4611 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4612 mmc->max_blk_size = 2; 4613 } else { 4614 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4615 SDHCI_MAX_BLOCK_SHIFT; 4616 if (mmc->max_blk_size >= 3) { 4617 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4618 mmc_hostname(mmc)); 4619 mmc->max_blk_size = 0; 4620 } 4621 } 4622 4623 mmc->max_blk_size = 512 << mmc->max_blk_size; 4624 4625 /* 4626 * Maximum block count. 4627 */ 4628 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4629 4630 if (mmc->max_segs == 1) 4631 /* This may alter mmc->*_blk_* parameters */ 4632 sdhci_allocate_bounce_buffer(host); 4633 4634 return 0; 4635 4636 unreg: 4637 if (host->sdhci_core_to_disable_vqmmc) 4638 regulator_disable(mmc->supply.vqmmc); 4639 undma: 4640 if (host->align_buffer) 4641 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4642 host->adma_table_sz, host->align_buffer, 4643 host->align_addr); 4644 host->adma_table = NULL; 4645 host->align_buffer = NULL; 4646 4647 return ret; 4648 } 4649 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4650 4651 void sdhci_cleanup_host(struct sdhci_host *host) 4652 { 4653 struct mmc_host *mmc = host->mmc; 4654 4655 if (host->sdhci_core_to_disable_vqmmc) 4656 regulator_disable(mmc->supply.vqmmc); 4657 4658 if (host->align_buffer) 4659 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4660 host->adma_table_sz, host->align_buffer, 4661 host->align_addr); 4662 4663 if (host->use_external_dma) 4664 sdhci_external_dma_release(host); 4665 4666 host->adma_table = NULL; 4667 host->align_buffer = NULL; 4668 } 4669 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4670 4671 int __sdhci_add_host(struct sdhci_host *host) 4672 { 4673 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4674 struct mmc_host *mmc = host->mmc; 4675 int ret; 4676 4677 if ((mmc->caps2 & MMC_CAP2_CQE) && 4678 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4679 mmc->caps2 &= ~MMC_CAP2_CQE; 4680 mmc->cqe_ops = NULL; 4681 } 4682 4683 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4684 if (!host->complete_wq) 4685 return -ENOMEM; 4686 4687 INIT_WORK(&host->complete_work, sdhci_complete_work); 4688 4689 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4690 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4691 4692 init_waitqueue_head(&host->buf_ready_int); 4693 4694 sdhci_init(host, 0); 4695 4696 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4697 IRQF_SHARED, mmc_hostname(mmc), host); 4698 if (ret) { 4699 pr_err("%s: Failed to request IRQ %d: %d\n", 4700 mmc_hostname(mmc), host->irq, ret); 4701 goto unwq; 4702 } 4703 4704 ret = sdhci_led_register(host); 4705 if (ret) { 4706 pr_err("%s: Failed to register LED device: %d\n", 4707 mmc_hostname(mmc), ret); 4708 goto unirq; 4709 } 4710 4711 ret = mmc_add_host(mmc); 4712 if (ret) 4713 goto unled; 4714 4715 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4716 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4717 host->use_external_dma ? "External DMA" : 4718 (host->flags & SDHCI_USE_ADMA) ? 4719 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4720 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4721 4722 sdhci_enable_card_detection(host); 4723 4724 return 0; 4725 4726 unled: 4727 sdhci_led_unregister(host); 4728 unirq: 4729 sdhci_do_reset(host, SDHCI_RESET_ALL); 4730 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4731 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4732 free_irq(host->irq, host); 4733 unwq: 4734 destroy_workqueue(host->complete_wq); 4735 4736 return ret; 4737 } 4738 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4739 4740 int sdhci_add_host(struct sdhci_host *host) 4741 { 4742 int ret; 4743 4744 ret = sdhci_setup_host(host); 4745 if (ret) 4746 return ret; 4747 4748 ret = __sdhci_add_host(host); 4749 if (ret) 4750 goto cleanup; 4751 4752 return 0; 4753 4754 cleanup: 4755 sdhci_cleanup_host(host); 4756 4757 return ret; 4758 } 4759 EXPORT_SYMBOL_GPL(sdhci_add_host); 4760 4761 void sdhci_remove_host(struct sdhci_host *host, int dead) 4762 { 4763 struct mmc_host *mmc = host->mmc; 4764 unsigned long flags; 4765 4766 if (dead) { 4767 spin_lock_irqsave(&host->lock, flags); 4768 4769 host->flags |= SDHCI_DEVICE_DEAD; 4770 4771 if (sdhci_has_requests(host)) { 4772 pr_err("%s: Controller removed during " 4773 " transfer!\n", mmc_hostname(mmc)); 4774 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4775 } 4776 4777 spin_unlock_irqrestore(&host->lock, flags); 4778 } 4779 4780 sdhci_disable_card_detection(host); 4781 4782 mmc_remove_host(mmc); 4783 4784 sdhci_led_unregister(host); 4785 4786 if (!dead) 4787 sdhci_do_reset(host, SDHCI_RESET_ALL); 4788 4789 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4790 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4791 free_irq(host->irq, host); 4792 4793 del_timer_sync(&host->timer); 4794 del_timer_sync(&host->data_timer); 4795 4796 destroy_workqueue(host->complete_wq); 4797 4798 if (host->sdhci_core_to_disable_vqmmc) 4799 regulator_disable(mmc->supply.vqmmc); 4800 4801 if (host->align_buffer) 4802 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4803 host->adma_table_sz, host->align_buffer, 4804 host->align_addr); 4805 4806 if (host->use_external_dma) 4807 sdhci_external_dma_release(host); 4808 4809 host->adma_table = NULL; 4810 host->align_buffer = NULL; 4811 } 4812 4813 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4814 4815 void sdhci_free_host(struct sdhci_host *host) 4816 { 4817 mmc_free_host(host->mmc); 4818 } 4819 4820 EXPORT_SYMBOL_GPL(sdhci_free_host); 4821 4822 /*****************************************************************************\ 4823 * * 4824 * Driver init/exit * 4825 * * 4826 \*****************************************************************************/ 4827 4828 static int __init sdhci_drv_init(void) 4829 { 4830 pr_info(DRIVER_NAME 4831 ": Secure Digital Host Controller Interface driver\n"); 4832 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4833 4834 return 0; 4835 } 4836 4837 static void __exit sdhci_drv_exit(void) 4838 { 4839 } 4840 4841 module_init(sdhci_drv_init); 4842 module_exit(sdhci_drv_exit); 4843 4844 module_param(debug_quirks, uint, 0444); 4845 module_param(debug_quirks2, uint, 0444); 4846 4847 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4848 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4849 MODULE_LICENSE("GPL"); 4850 4851 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4852 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4853