1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 53 54 void sdhci_dumpregs(struct sdhci_host *host) 55 { 56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 57 58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 92 sdhci_readl(host, SDHCI_RESPONSE), 93 sdhci_readl(host, SDHCI_RESPONSE + 4)); 94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE + 8), 96 sdhci_readl(host, SDHCI_RESPONSE + 12)); 97 SDHCI_DUMP("Host ctl2: 0x%08x\n", 98 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 99 100 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_64_BIT_DMA) { 102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 sdhci_readl(host, SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 106 } else { 107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 sdhci_readl(host, SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 110 } 111 } 112 113 if (host->ops->dump_vendor_regs) 114 host->ops->dump_vendor_regs(host); 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (1) { 220 bool timedout = ktime_after(ktime_get(), timeout); 221 222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 223 break; 224 if (timedout) { 225 pr_err("%s: Reset 0x%x never completed.\n", 226 mmc_hostname(host->mmc), (int)mask); 227 sdhci_dumpregs(host); 228 return; 229 } 230 udelay(10); 231 } 232 } 233 EXPORT_SYMBOL_GPL(sdhci_reset); 234 235 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 236 { 237 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 238 struct mmc_host *mmc = host->mmc; 239 240 if (!mmc->ops->get_cd(mmc)) 241 return; 242 } 243 244 host->ops->reset(host, mask); 245 246 if (mask & SDHCI_RESET_ALL) { 247 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 248 if (host->ops->enable_dma) 249 host->ops->enable_dma(host); 250 } 251 252 /* Resetting the controller clears many */ 253 host->preset_enabled = false; 254 } 255 } 256 257 static void sdhci_set_default_irqs(struct sdhci_host *host) 258 { 259 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 260 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 261 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 262 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 263 SDHCI_INT_RESPONSE; 264 265 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 266 host->tuning_mode == SDHCI_TUNING_MODE_3) 267 host->ier |= SDHCI_INT_RETUNE; 268 269 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 270 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 271 } 272 273 static void sdhci_config_dma(struct sdhci_host *host) 274 { 275 u8 ctrl; 276 u16 ctrl2; 277 278 if (host->version < SDHCI_SPEC_200) 279 return; 280 281 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 282 283 /* 284 * Always adjust the DMA selection as some controllers 285 * (e.g. JMicron) can't do PIO properly when the selection 286 * is ADMA. 287 */ 288 ctrl &= ~SDHCI_CTRL_DMA_MASK; 289 if (!(host->flags & SDHCI_REQ_USE_DMA)) 290 goto out; 291 292 /* Note if DMA Select is zero then SDMA is selected */ 293 if (host->flags & SDHCI_USE_ADMA) 294 ctrl |= SDHCI_CTRL_ADMA32; 295 296 if (host->flags & SDHCI_USE_64_BIT_DMA) { 297 /* 298 * If v4 mode, all supported DMA can be 64-bit addressing if 299 * controller supports 64-bit system address, otherwise only 300 * ADMA can support 64-bit addressing. 301 */ 302 if (host->v4_mode) { 303 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 304 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 305 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 306 } else if (host->flags & SDHCI_USE_ADMA) { 307 /* 308 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 309 * set SDHCI_CTRL_ADMA64. 310 */ 311 ctrl |= SDHCI_CTRL_ADMA64; 312 } 313 } 314 315 out: 316 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 317 } 318 319 static void sdhci_init(struct sdhci_host *host, int soft) 320 { 321 struct mmc_host *mmc = host->mmc; 322 unsigned long flags; 323 324 if (soft) 325 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 326 else 327 sdhci_do_reset(host, SDHCI_RESET_ALL); 328 329 if (host->v4_mode) 330 sdhci_do_enable_v4_mode(host); 331 332 spin_lock_irqsave(&host->lock, flags); 333 sdhci_set_default_irqs(host); 334 spin_unlock_irqrestore(&host->lock, flags); 335 336 host->cqe_on = false; 337 338 if (soft) { 339 /* force clock reconfiguration */ 340 host->clock = 0; 341 mmc->ops->set_ios(mmc, &mmc->ios); 342 } 343 } 344 345 static void sdhci_reinit(struct sdhci_host *host) 346 { 347 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 348 349 sdhci_init(host, 0); 350 sdhci_enable_card_detection(host); 351 352 /* 353 * A change to the card detect bits indicates a change in present state, 354 * refer sdhci_set_card_detection(). A card detect interrupt might have 355 * been missed while the host controller was being reset, so trigger a 356 * rescan to check. 357 */ 358 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 359 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 360 } 361 362 static void __sdhci_led_activate(struct sdhci_host *host) 363 { 364 u8 ctrl; 365 366 if (host->quirks & SDHCI_QUIRK_NO_LED) 367 return; 368 369 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 370 ctrl |= SDHCI_CTRL_LED; 371 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 372 } 373 374 static void __sdhci_led_deactivate(struct sdhci_host *host) 375 { 376 u8 ctrl; 377 378 if (host->quirks & SDHCI_QUIRK_NO_LED) 379 return; 380 381 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 382 ctrl &= ~SDHCI_CTRL_LED; 383 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 384 } 385 386 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 387 static void sdhci_led_control(struct led_classdev *led, 388 enum led_brightness brightness) 389 { 390 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 391 unsigned long flags; 392 393 spin_lock_irqsave(&host->lock, flags); 394 395 if (host->runtime_suspended) 396 goto out; 397 398 if (brightness == LED_OFF) 399 __sdhci_led_deactivate(host); 400 else 401 __sdhci_led_activate(host); 402 out: 403 spin_unlock_irqrestore(&host->lock, flags); 404 } 405 406 static int sdhci_led_register(struct sdhci_host *host) 407 { 408 struct mmc_host *mmc = host->mmc; 409 410 if (host->quirks & SDHCI_QUIRK_NO_LED) 411 return 0; 412 413 snprintf(host->led_name, sizeof(host->led_name), 414 "%s::", mmc_hostname(mmc)); 415 416 host->led.name = host->led_name; 417 host->led.brightness = LED_OFF; 418 host->led.default_trigger = mmc_hostname(mmc); 419 host->led.brightness_set = sdhci_led_control; 420 421 return led_classdev_register(mmc_dev(mmc), &host->led); 422 } 423 424 static void sdhci_led_unregister(struct sdhci_host *host) 425 { 426 if (host->quirks & SDHCI_QUIRK_NO_LED) 427 return; 428 429 led_classdev_unregister(&host->led); 430 } 431 432 static inline void sdhci_led_activate(struct sdhci_host *host) 433 { 434 } 435 436 static inline void sdhci_led_deactivate(struct sdhci_host *host) 437 { 438 } 439 440 #else 441 442 static inline int sdhci_led_register(struct sdhci_host *host) 443 { 444 return 0; 445 } 446 447 static inline void sdhci_led_unregister(struct sdhci_host *host) 448 { 449 } 450 451 static inline void sdhci_led_activate(struct sdhci_host *host) 452 { 453 __sdhci_led_activate(host); 454 } 455 456 static inline void sdhci_led_deactivate(struct sdhci_host *host) 457 { 458 __sdhci_led_deactivate(host); 459 } 460 461 #endif 462 463 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 464 unsigned long timeout) 465 { 466 if (sdhci_data_line_cmd(mrq->cmd)) 467 mod_timer(&host->data_timer, timeout); 468 else 469 mod_timer(&host->timer, timeout); 470 } 471 472 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 473 { 474 if (sdhci_data_line_cmd(mrq->cmd)) 475 del_timer(&host->data_timer); 476 else 477 del_timer(&host->timer); 478 } 479 480 static inline bool sdhci_has_requests(struct sdhci_host *host) 481 { 482 return host->cmd || host->data_cmd; 483 } 484 485 /*****************************************************************************\ 486 * * 487 * Core functions * 488 * * 489 \*****************************************************************************/ 490 491 static void sdhci_read_block_pio(struct sdhci_host *host) 492 { 493 unsigned long flags; 494 size_t blksize, len, chunk; 495 u32 scratch; 496 u8 *buf; 497 498 DBG("PIO reading\n"); 499 500 blksize = host->data->blksz; 501 chunk = 0; 502 503 local_irq_save(flags); 504 505 while (blksize) { 506 BUG_ON(!sg_miter_next(&host->sg_miter)); 507 508 len = min(host->sg_miter.length, blksize); 509 510 blksize -= len; 511 host->sg_miter.consumed = len; 512 513 buf = host->sg_miter.addr; 514 515 while (len) { 516 if (chunk == 0) { 517 scratch = sdhci_readl(host, SDHCI_BUFFER); 518 chunk = 4; 519 } 520 521 *buf = scratch & 0xFF; 522 523 buf++; 524 scratch >>= 8; 525 chunk--; 526 len--; 527 } 528 } 529 530 sg_miter_stop(&host->sg_miter); 531 532 local_irq_restore(flags); 533 } 534 535 static void sdhci_write_block_pio(struct sdhci_host *host) 536 { 537 unsigned long flags; 538 size_t blksize, len, chunk; 539 u32 scratch; 540 u8 *buf; 541 542 DBG("PIO writing\n"); 543 544 blksize = host->data->blksz; 545 chunk = 0; 546 scratch = 0; 547 548 local_irq_save(flags); 549 550 while (blksize) { 551 BUG_ON(!sg_miter_next(&host->sg_miter)); 552 553 len = min(host->sg_miter.length, blksize); 554 555 blksize -= len; 556 host->sg_miter.consumed = len; 557 558 buf = host->sg_miter.addr; 559 560 while (len) { 561 scratch |= (u32)*buf << (chunk * 8); 562 563 buf++; 564 chunk++; 565 len--; 566 567 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 568 sdhci_writel(host, scratch, SDHCI_BUFFER); 569 chunk = 0; 570 scratch = 0; 571 } 572 } 573 } 574 575 sg_miter_stop(&host->sg_miter); 576 577 local_irq_restore(flags); 578 } 579 580 static void sdhci_transfer_pio(struct sdhci_host *host) 581 { 582 u32 mask; 583 584 if (host->blocks == 0) 585 return; 586 587 if (host->data->flags & MMC_DATA_READ) 588 mask = SDHCI_DATA_AVAILABLE; 589 else 590 mask = SDHCI_SPACE_AVAILABLE; 591 592 /* 593 * Some controllers (JMicron JMB38x) mess up the buffer bits 594 * for transfers < 4 bytes. As long as it is just one block, 595 * we can ignore the bits. 596 */ 597 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 598 (host->data->blocks == 1)) 599 mask = ~0; 600 601 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 602 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 603 udelay(100); 604 605 if (host->data->flags & MMC_DATA_READ) 606 sdhci_read_block_pio(host); 607 else 608 sdhci_write_block_pio(host); 609 610 host->blocks--; 611 if (host->blocks == 0) 612 break; 613 } 614 615 DBG("PIO transfer complete.\n"); 616 } 617 618 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 619 struct mmc_data *data, int cookie) 620 { 621 int sg_count; 622 623 /* 624 * If the data buffers are already mapped, return the previous 625 * dma_map_sg() result. 626 */ 627 if (data->host_cookie == COOKIE_PRE_MAPPED) 628 return data->sg_count; 629 630 /* Bounce write requests to the bounce buffer */ 631 if (host->bounce_buffer) { 632 unsigned int length = data->blksz * data->blocks; 633 634 if (length > host->bounce_buffer_size) { 635 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 636 mmc_hostname(host->mmc), length, 637 host->bounce_buffer_size); 638 return -EIO; 639 } 640 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 641 /* Copy the data to the bounce buffer */ 642 if (host->ops->copy_to_bounce_buffer) { 643 host->ops->copy_to_bounce_buffer(host, 644 data, length); 645 } else { 646 sg_copy_to_buffer(data->sg, data->sg_len, 647 host->bounce_buffer, length); 648 } 649 } 650 /* Switch ownership to the DMA */ 651 dma_sync_single_for_device(mmc_dev(host->mmc), 652 host->bounce_addr, 653 host->bounce_buffer_size, 654 mmc_get_dma_dir(data)); 655 /* Just a dummy value */ 656 sg_count = 1; 657 } else { 658 /* Just access the data directly from memory */ 659 sg_count = dma_map_sg(mmc_dev(host->mmc), 660 data->sg, data->sg_len, 661 mmc_get_dma_dir(data)); 662 } 663 664 if (sg_count == 0) 665 return -ENOSPC; 666 667 data->sg_count = sg_count; 668 data->host_cookie = cookie; 669 670 return sg_count; 671 } 672 673 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 674 { 675 local_irq_save(*flags); 676 return kmap_atomic(sg_page(sg)) + sg->offset; 677 } 678 679 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 680 { 681 kunmap_atomic(buffer); 682 local_irq_restore(*flags); 683 } 684 685 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 686 dma_addr_t addr, int len, unsigned int cmd) 687 { 688 struct sdhci_adma2_64_desc *dma_desc = *desc; 689 690 /* 32-bit and 64-bit descriptors have these members in same position */ 691 dma_desc->cmd = cpu_to_le16(cmd); 692 dma_desc->len = cpu_to_le16(len); 693 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 694 695 if (host->flags & SDHCI_USE_64_BIT_DMA) 696 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 697 698 *desc += host->desc_sz; 699 } 700 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 701 702 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 703 void **desc, dma_addr_t addr, 704 int len, unsigned int cmd) 705 { 706 if (host->ops->adma_write_desc) 707 host->ops->adma_write_desc(host, desc, addr, len, cmd); 708 else 709 sdhci_adma_write_desc(host, desc, addr, len, cmd); 710 } 711 712 static void sdhci_adma_mark_end(void *desc) 713 { 714 struct sdhci_adma2_64_desc *dma_desc = desc; 715 716 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 717 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 718 } 719 720 static void sdhci_adma_table_pre(struct sdhci_host *host, 721 struct mmc_data *data, int sg_count) 722 { 723 struct scatterlist *sg; 724 unsigned long flags; 725 dma_addr_t addr, align_addr; 726 void *desc, *align; 727 char *buffer; 728 int len, offset, i; 729 730 /* 731 * The spec does not specify endianness of descriptor table. 732 * We currently guess that it is LE. 733 */ 734 735 host->sg_count = sg_count; 736 737 desc = host->adma_table; 738 align = host->align_buffer; 739 740 align_addr = host->align_addr; 741 742 for_each_sg(data->sg, sg, host->sg_count, i) { 743 addr = sg_dma_address(sg); 744 len = sg_dma_len(sg); 745 746 /* 747 * The SDHCI specification states that ADMA addresses must 748 * be 32-bit aligned. If they aren't, then we use a bounce 749 * buffer for the (up to three) bytes that screw up the 750 * alignment. 751 */ 752 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 753 SDHCI_ADMA2_MASK; 754 if (offset) { 755 if (data->flags & MMC_DATA_WRITE) { 756 buffer = sdhci_kmap_atomic(sg, &flags); 757 memcpy(align, buffer, offset); 758 sdhci_kunmap_atomic(buffer, &flags); 759 } 760 761 /* tran, valid */ 762 __sdhci_adma_write_desc(host, &desc, align_addr, 763 offset, ADMA2_TRAN_VALID); 764 765 BUG_ON(offset > 65536); 766 767 align += SDHCI_ADMA2_ALIGN; 768 align_addr += SDHCI_ADMA2_ALIGN; 769 770 addr += offset; 771 len -= offset; 772 } 773 774 BUG_ON(len > 65536); 775 776 /* tran, valid */ 777 if (len) 778 __sdhci_adma_write_desc(host, &desc, addr, len, 779 ADMA2_TRAN_VALID); 780 781 /* 782 * If this triggers then we have a calculation bug 783 * somewhere. :/ 784 */ 785 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 786 } 787 788 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 789 /* Mark the last descriptor as the terminating descriptor */ 790 if (desc != host->adma_table) { 791 desc -= host->desc_sz; 792 sdhci_adma_mark_end(desc); 793 } 794 } else { 795 /* Add a terminating entry - nop, end, valid */ 796 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 797 } 798 } 799 800 static void sdhci_adma_table_post(struct sdhci_host *host, 801 struct mmc_data *data) 802 { 803 struct scatterlist *sg; 804 int i, size; 805 void *align; 806 char *buffer; 807 unsigned long flags; 808 809 if (data->flags & MMC_DATA_READ) { 810 bool has_unaligned = false; 811 812 /* Do a quick scan of the SG list for any unaligned mappings */ 813 for_each_sg(data->sg, sg, host->sg_count, i) 814 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 815 has_unaligned = true; 816 break; 817 } 818 819 if (has_unaligned) { 820 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 821 data->sg_len, DMA_FROM_DEVICE); 822 823 align = host->align_buffer; 824 825 for_each_sg(data->sg, sg, host->sg_count, i) { 826 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 827 size = SDHCI_ADMA2_ALIGN - 828 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 829 830 buffer = sdhci_kmap_atomic(sg, &flags); 831 memcpy(buffer, align, size); 832 sdhci_kunmap_atomic(buffer, &flags); 833 834 align += SDHCI_ADMA2_ALIGN; 835 } 836 } 837 } 838 } 839 } 840 841 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 842 { 843 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 844 if (host->flags & SDHCI_USE_64_BIT_DMA) 845 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 846 } 847 848 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 849 { 850 if (host->bounce_buffer) 851 return host->bounce_addr; 852 else 853 return sg_dma_address(host->data->sg); 854 } 855 856 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 857 { 858 if (host->v4_mode) 859 sdhci_set_adma_addr(host, addr); 860 else 861 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 862 } 863 864 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 865 struct mmc_command *cmd, 866 struct mmc_data *data) 867 { 868 unsigned int target_timeout; 869 870 /* timeout in us */ 871 if (!data) { 872 target_timeout = cmd->busy_timeout * 1000; 873 } else { 874 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 875 if (host->clock && data->timeout_clks) { 876 unsigned long long val; 877 878 /* 879 * data->timeout_clks is in units of clock cycles. 880 * host->clock is in Hz. target_timeout is in us. 881 * Hence, us = 1000000 * cycles / Hz. Round up. 882 */ 883 val = 1000000ULL * data->timeout_clks; 884 if (do_div(val, host->clock)) 885 target_timeout++; 886 target_timeout += val; 887 } 888 } 889 890 return target_timeout; 891 } 892 893 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 894 struct mmc_command *cmd) 895 { 896 struct mmc_data *data = cmd->data; 897 struct mmc_host *mmc = host->mmc; 898 struct mmc_ios *ios = &mmc->ios; 899 unsigned char bus_width = 1 << ios->bus_width; 900 unsigned int blksz; 901 unsigned int freq; 902 u64 target_timeout; 903 u64 transfer_time; 904 905 target_timeout = sdhci_target_timeout(host, cmd, data); 906 target_timeout *= NSEC_PER_USEC; 907 908 if (data) { 909 blksz = data->blksz; 910 freq = mmc->actual_clock ? : host->clock; 911 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 912 do_div(transfer_time, freq); 913 /* multiply by '2' to account for any unknowns */ 914 transfer_time = transfer_time * 2; 915 /* calculate timeout for the entire data */ 916 host->data_timeout = data->blocks * target_timeout + 917 transfer_time; 918 } else { 919 host->data_timeout = target_timeout; 920 } 921 922 if (host->data_timeout) 923 host->data_timeout += MMC_CMD_TRANSFER_TIME; 924 } 925 926 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 927 bool *too_big) 928 { 929 u8 count; 930 struct mmc_data *data; 931 unsigned target_timeout, current_timeout; 932 933 *too_big = false; 934 935 /* 936 * If the host controller provides us with an incorrect timeout 937 * value, just skip the check and use the maximum. The hardware may take 938 * longer to time out, but that's much better than having a too-short 939 * timeout value. 940 */ 941 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 942 return host->max_timeout_count; 943 944 /* Unspecified command, assume max */ 945 if (cmd == NULL) 946 return host->max_timeout_count; 947 948 data = cmd->data; 949 /* Unspecified timeout, assume max */ 950 if (!data && !cmd->busy_timeout) 951 return host->max_timeout_count; 952 953 /* timeout in us */ 954 target_timeout = sdhci_target_timeout(host, cmd, data); 955 956 /* 957 * Figure out needed cycles. 958 * We do this in steps in order to fit inside a 32 bit int. 959 * The first step is the minimum timeout, which will have a 960 * minimum resolution of 6 bits: 961 * (1) 2^13*1000 > 2^22, 962 * (2) host->timeout_clk < 2^16 963 * => 964 * (1) / (2) > 2^6 965 */ 966 count = 0; 967 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 968 while (current_timeout < target_timeout) { 969 count++; 970 current_timeout <<= 1; 971 if (count > host->max_timeout_count) { 972 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 973 DBG("Too large timeout 0x%x requested for CMD%d!\n", 974 count, cmd->opcode); 975 count = host->max_timeout_count; 976 *too_big = true; 977 break; 978 } 979 } 980 981 return count; 982 } 983 984 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 985 { 986 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 987 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 988 989 if (host->flags & SDHCI_REQ_USE_DMA) 990 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 991 else 992 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 993 994 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 995 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 996 else 997 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 998 999 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1000 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1001 } 1002 1003 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1004 { 1005 if (enable) 1006 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1007 else 1008 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1009 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1010 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1011 } 1012 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1013 1014 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1015 { 1016 bool too_big = false; 1017 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1018 1019 if (too_big && 1020 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1021 sdhci_calc_sw_timeout(host, cmd); 1022 sdhci_set_data_timeout_irq(host, false); 1023 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1024 sdhci_set_data_timeout_irq(host, true); 1025 } 1026 1027 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1028 } 1029 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1030 1031 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1032 { 1033 if (host->ops->set_timeout) 1034 host->ops->set_timeout(host, cmd); 1035 else 1036 __sdhci_set_timeout(host, cmd); 1037 } 1038 1039 static void sdhci_initialize_data(struct sdhci_host *host, 1040 struct mmc_data *data) 1041 { 1042 WARN_ON(host->data); 1043 1044 /* Sanity checks */ 1045 BUG_ON(data->blksz * data->blocks > 524288); 1046 BUG_ON(data->blksz > host->mmc->max_blk_size); 1047 BUG_ON(data->blocks > 65535); 1048 1049 host->data = data; 1050 host->data_early = 0; 1051 host->data->bytes_xfered = 0; 1052 } 1053 1054 static inline void sdhci_set_block_info(struct sdhci_host *host, 1055 struct mmc_data *data) 1056 { 1057 /* Set the DMA boundary value and block size */ 1058 sdhci_writew(host, 1059 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1060 SDHCI_BLOCK_SIZE); 1061 /* 1062 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1063 * can be supported, in that case 16-bit block count register must be 0. 1064 */ 1065 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1066 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1067 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1068 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1069 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1070 } else { 1071 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1072 } 1073 } 1074 1075 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1076 { 1077 struct mmc_data *data = cmd->data; 1078 1079 sdhci_initialize_data(host, data); 1080 1081 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1082 struct scatterlist *sg; 1083 unsigned int length_mask, offset_mask; 1084 int i; 1085 1086 host->flags |= SDHCI_REQ_USE_DMA; 1087 1088 /* 1089 * FIXME: This doesn't account for merging when mapping the 1090 * scatterlist. 1091 * 1092 * The assumption here being that alignment and lengths are 1093 * the same after DMA mapping to device address space. 1094 */ 1095 length_mask = 0; 1096 offset_mask = 0; 1097 if (host->flags & SDHCI_USE_ADMA) { 1098 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1099 length_mask = 3; 1100 /* 1101 * As we use up to 3 byte chunks to work 1102 * around alignment problems, we need to 1103 * check the offset as well. 1104 */ 1105 offset_mask = 3; 1106 } 1107 } else { 1108 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1109 length_mask = 3; 1110 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1111 offset_mask = 3; 1112 } 1113 1114 if (unlikely(length_mask | offset_mask)) { 1115 for_each_sg(data->sg, sg, data->sg_len, i) { 1116 if (sg->length & length_mask) { 1117 DBG("Reverting to PIO because of transfer size (%d)\n", 1118 sg->length); 1119 host->flags &= ~SDHCI_REQ_USE_DMA; 1120 break; 1121 } 1122 if (sg->offset & offset_mask) { 1123 DBG("Reverting to PIO because of bad alignment\n"); 1124 host->flags &= ~SDHCI_REQ_USE_DMA; 1125 break; 1126 } 1127 } 1128 } 1129 } 1130 1131 if (host->flags & SDHCI_REQ_USE_DMA) { 1132 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1133 1134 if (sg_cnt <= 0) { 1135 /* 1136 * This only happens when someone fed 1137 * us an invalid request. 1138 */ 1139 WARN_ON(1); 1140 host->flags &= ~SDHCI_REQ_USE_DMA; 1141 } else if (host->flags & SDHCI_USE_ADMA) { 1142 sdhci_adma_table_pre(host, data, sg_cnt); 1143 sdhci_set_adma_addr(host, host->adma_addr); 1144 } else { 1145 WARN_ON(sg_cnt != 1); 1146 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1147 } 1148 } 1149 1150 sdhci_config_dma(host); 1151 1152 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1153 int flags; 1154 1155 flags = SG_MITER_ATOMIC; 1156 if (host->data->flags & MMC_DATA_READ) 1157 flags |= SG_MITER_TO_SG; 1158 else 1159 flags |= SG_MITER_FROM_SG; 1160 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1161 host->blocks = data->blocks; 1162 } 1163 1164 sdhci_set_transfer_irqs(host); 1165 1166 sdhci_set_block_info(host, data); 1167 } 1168 1169 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1170 1171 static int sdhci_external_dma_init(struct sdhci_host *host) 1172 { 1173 int ret = 0; 1174 struct mmc_host *mmc = host->mmc; 1175 1176 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1177 if (IS_ERR(host->tx_chan)) { 1178 ret = PTR_ERR(host->tx_chan); 1179 if (ret != -EPROBE_DEFER) 1180 pr_warn("Failed to request TX DMA channel.\n"); 1181 host->tx_chan = NULL; 1182 return ret; 1183 } 1184 1185 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1186 if (IS_ERR(host->rx_chan)) { 1187 if (host->tx_chan) { 1188 dma_release_channel(host->tx_chan); 1189 host->tx_chan = NULL; 1190 } 1191 1192 ret = PTR_ERR(host->rx_chan); 1193 if (ret != -EPROBE_DEFER) 1194 pr_warn("Failed to request RX DMA channel.\n"); 1195 host->rx_chan = NULL; 1196 } 1197 1198 return ret; 1199 } 1200 1201 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1202 struct mmc_data *data) 1203 { 1204 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1205 } 1206 1207 static int sdhci_external_dma_setup(struct sdhci_host *host, 1208 struct mmc_command *cmd) 1209 { 1210 int ret, i; 1211 enum dma_transfer_direction dir; 1212 struct dma_async_tx_descriptor *desc; 1213 struct mmc_data *data = cmd->data; 1214 struct dma_chan *chan; 1215 struct dma_slave_config cfg; 1216 dma_cookie_t cookie; 1217 int sg_cnt; 1218 1219 if (!host->mapbase) 1220 return -EINVAL; 1221 1222 memset(&cfg, 0, sizeof(cfg)); 1223 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1224 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1225 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1226 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1227 cfg.src_maxburst = data->blksz / 4; 1228 cfg.dst_maxburst = data->blksz / 4; 1229 1230 /* Sanity check: all the SG entries must be aligned by block size. */ 1231 for (i = 0; i < data->sg_len; i++) { 1232 if ((data->sg + i)->length % data->blksz) 1233 return -EINVAL; 1234 } 1235 1236 chan = sdhci_external_dma_channel(host, data); 1237 1238 ret = dmaengine_slave_config(chan, &cfg); 1239 if (ret) 1240 return ret; 1241 1242 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1243 if (sg_cnt <= 0) 1244 return -EINVAL; 1245 1246 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1247 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1248 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1249 if (!desc) 1250 return -EINVAL; 1251 1252 desc->callback = NULL; 1253 desc->callback_param = NULL; 1254 1255 cookie = dmaengine_submit(desc); 1256 if (dma_submit_error(cookie)) 1257 ret = cookie; 1258 1259 return ret; 1260 } 1261 1262 static void sdhci_external_dma_release(struct sdhci_host *host) 1263 { 1264 if (host->tx_chan) { 1265 dma_release_channel(host->tx_chan); 1266 host->tx_chan = NULL; 1267 } 1268 1269 if (host->rx_chan) { 1270 dma_release_channel(host->rx_chan); 1271 host->rx_chan = NULL; 1272 } 1273 1274 sdhci_switch_external_dma(host, false); 1275 } 1276 1277 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1278 struct mmc_command *cmd) 1279 { 1280 struct mmc_data *data = cmd->data; 1281 1282 sdhci_initialize_data(host, data); 1283 1284 host->flags |= SDHCI_REQ_USE_DMA; 1285 sdhci_set_transfer_irqs(host); 1286 1287 sdhci_set_block_info(host, data); 1288 } 1289 1290 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1291 struct mmc_command *cmd) 1292 { 1293 if (!sdhci_external_dma_setup(host, cmd)) { 1294 __sdhci_external_dma_prepare_data(host, cmd); 1295 } else { 1296 sdhci_external_dma_release(host); 1297 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1298 mmc_hostname(host->mmc)); 1299 sdhci_prepare_data(host, cmd); 1300 } 1301 } 1302 1303 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1304 struct mmc_command *cmd) 1305 { 1306 struct dma_chan *chan; 1307 1308 if (!cmd->data) 1309 return; 1310 1311 chan = sdhci_external_dma_channel(host, cmd->data); 1312 if (chan) 1313 dma_async_issue_pending(chan); 1314 } 1315 1316 #else 1317 1318 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1319 { 1320 return -EOPNOTSUPP; 1321 } 1322 1323 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1324 { 1325 } 1326 1327 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1328 struct mmc_command *cmd) 1329 { 1330 /* This should never happen */ 1331 WARN_ON_ONCE(1); 1332 } 1333 1334 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1335 struct mmc_command *cmd) 1336 { 1337 } 1338 1339 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1340 struct mmc_data *data) 1341 { 1342 return NULL; 1343 } 1344 1345 #endif 1346 1347 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1348 { 1349 host->use_external_dma = en; 1350 } 1351 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1352 1353 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1354 struct mmc_request *mrq) 1355 { 1356 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1357 !mrq->cap_cmd_during_tfr; 1358 } 1359 1360 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1361 struct mmc_request *mrq) 1362 { 1363 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1364 } 1365 1366 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1367 struct mmc_request *mrq) 1368 { 1369 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1370 } 1371 1372 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1373 struct mmc_command *cmd, 1374 u16 *mode) 1375 { 1376 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1377 (cmd->opcode != SD_IO_RW_EXTENDED); 1378 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1379 u16 ctrl2; 1380 1381 /* 1382 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1383 * Select' is recommended rather than use of 'Auto CMD12 1384 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1385 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1386 */ 1387 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1388 (use_cmd12 || use_cmd23)) { 1389 *mode |= SDHCI_TRNS_AUTO_SEL; 1390 1391 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1392 if (use_cmd23) 1393 ctrl2 |= SDHCI_CMD23_ENABLE; 1394 else 1395 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1396 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1397 1398 return; 1399 } 1400 1401 /* 1402 * If we are sending CMD23, CMD12 never gets sent 1403 * on successful completion (so no Auto-CMD12). 1404 */ 1405 if (use_cmd12) 1406 *mode |= SDHCI_TRNS_AUTO_CMD12; 1407 else if (use_cmd23) 1408 *mode |= SDHCI_TRNS_AUTO_CMD23; 1409 } 1410 1411 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1412 struct mmc_command *cmd) 1413 { 1414 u16 mode = 0; 1415 struct mmc_data *data = cmd->data; 1416 1417 if (data == NULL) { 1418 if (host->quirks2 & 1419 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1420 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1421 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1422 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1423 } else { 1424 /* clear Auto CMD settings for no data CMDs */ 1425 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1426 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1427 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1428 } 1429 return; 1430 } 1431 1432 WARN_ON(!host->data); 1433 1434 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1435 mode = SDHCI_TRNS_BLK_CNT_EN; 1436 1437 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1438 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1439 sdhci_auto_cmd_select(host, cmd, &mode); 1440 if (sdhci_auto_cmd23(host, cmd->mrq)) 1441 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1442 } 1443 1444 if (data->flags & MMC_DATA_READ) 1445 mode |= SDHCI_TRNS_READ; 1446 if (host->flags & SDHCI_REQ_USE_DMA) 1447 mode |= SDHCI_TRNS_DMA; 1448 1449 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1450 } 1451 1452 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1453 { 1454 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1455 ((mrq->cmd && mrq->cmd->error) || 1456 (mrq->sbc && mrq->sbc->error) || 1457 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1458 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1459 } 1460 1461 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1462 { 1463 int i; 1464 1465 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1466 if (host->mrqs_done[i] == mrq) { 1467 WARN_ON(1); 1468 return; 1469 } 1470 } 1471 1472 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1473 if (!host->mrqs_done[i]) { 1474 host->mrqs_done[i] = mrq; 1475 break; 1476 } 1477 } 1478 1479 WARN_ON(i >= SDHCI_MAX_MRQS); 1480 } 1481 1482 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1483 { 1484 if (host->cmd && host->cmd->mrq == mrq) 1485 host->cmd = NULL; 1486 1487 if (host->data_cmd && host->data_cmd->mrq == mrq) 1488 host->data_cmd = NULL; 1489 1490 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1491 host->deferred_cmd = NULL; 1492 1493 if (host->data && host->data->mrq == mrq) 1494 host->data = NULL; 1495 1496 if (sdhci_needs_reset(host, mrq)) 1497 host->pending_reset = true; 1498 1499 sdhci_set_mrq_done(host, mrq); 1500 1501 sdhci_del_timer(host, mrq); 1502 1503 if (!sdhci_has_requests(host)) 1504 sdhci_led_deactivate(host); 1505 } 1506 1507 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1508 { 1509 __sdhci_finish_mrq(host, mrq); 1510 1511 queue_work(host->complete_wq, &host->complete_work); 1512 } 1513 1514 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1515 { 1516 struct mmc_command *data_cmd = host->data_cmd; 1517 struct mmc_data *data = host->data; 1518 1519 host->data = NULL; 1520 host->data_cmd = NULL; 1521 1522 /* 1523 * The controller needs a reset of internal state machines upon error 1524 * conditions. 1525 */ 1526 if (data->error) { 1527 if (!host->cmd || host->cmd == data_cmd) 1528 sdhci_do_reset(host, SDHCI_RESET_CMD); 1529 sdhci_do_reset(host, SDHCI_RESET_DATA); 1530 } 1531 1532 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1533 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1534 sdhci_adma_table_post(host, data); 1535 1536 /* 1537 * The specification states that the block count register must 1538 * be updated, but it does not specify at what point in the 1539 * data flow. That makes the register entirely useless to read 1540 * back so we have to assume that nothing made it to the card 1541 * in the event of an error. 1542 */ 1543 if (data->error) 1544 data->bytes_xfered = 0; 1545 else 1546 data->bytes_xfered = data->blksz * data->blocks; 1547 1548 /* 1549 * Need to send CMD12 if - 1550 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1551 * b) error in multiblock transfer 1552 */ 1553 if (data->stop && 1554 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1555 data->error)) { 1556 /* 1557 * 'cap_cmd_during_tfr' request must not use the command line 1558 * after mmc_command_done() has been called. It is upper layer's 1559 * responsibility to send the stop command if required. 1560 */ 1561 if (data->mrq->cap_cmd_during_tfr) { 1562 __sdhci_finish_mrq(host, data->mrq); 1563 } else { 1564 /* Avoid triggering warning in sdhci_send_command() */ 1565 host->cmd = NULL; 1566 if (!sdhci_send_command(host, data->stop)) { 1567 if (sw_data_timeout) { 1568 /* 1569 * This is anyway a sw data timeout, so 1570 * give up now. 1571 */ 1572 data->stop->error = -EIO; 1573 __sdhci_finish_mrq(host, data->mrq); 1574 } else { 1575 WARN_ON(host->deferred_cmd); 1576 host->deferred_cmd = data->stop; 1577 } 1578 } 1579 } 1580 } else { 1581 __sdhci_finish_mrq(host, data->mrq); 1582 } 1583 } 1584 1585 static void sdhci_finish_data(struct sdhci_host *host) 1586 { 1587 __sdhci_finish_data(host, false); 1588 } 1589 1590 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1591 { 1592 int flags; 1593 u32 mask; 1594 unsigned long timeout; 1595 1596 WARN_ON(host->cmd); 1597 1598 /* Initially, a command has no error */ 1599 cmd->error = 0; 1600 1601 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1602 cmd->opcode == MMC_STOP_TRANSMISSION) 1603 cmd->flags |= MMC_RSP_BUSY; 1604 1605 mask = SDHCI_CMD_INHIBIT; 1606 if (sdhci_data_line_cmd(cmd)) 1607 mask |= SDHCI_DATA_INHIBIT; 1608 1609 /* We shouldn't wait for data inihibit for stop commands, even 1610 though they might use busy signaling */ 1611 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1612 mask &= ~SDHCI_DATA_INHIBIT; 1613 1614 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1615 return false; 1616 1617 host->cmd = cmd; 1618 host->data_timeout = 0; 1619 if (sdhci_data_line_cmd(cmd)) { 1620 WARN_ON(host->data_cmd); 1621 host->data_cmd = cmd; 1622 sdhci_set_timeout(host, cmd); 1623 } 1624 1625 if (cmd->data) { 1626 if (host->use_external_dma) 1627 sdhci_external_dma_prepare_data(host, cmd); 1628 else 1629 sdhci_prepare_data(host, cmd); 1630 } 1631 1632 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1633 1634 sdhci_set_transfer_mode(host, cmd); 1635 1636 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1637 WARN_ONCE(1, "Unsupported response type!\n"); 1638 /* 1639 * This does not happen in practice because 136-bit response 1640 * commands never have busy waiting, so rather than complicate 1641 * the error path, just remove busy waiting and continue. 1642 */ 1643 cmd->flags &= ~MMC_RSP_BUSY; 1644 } 1645 1646 if (!(cmd->flags & MMC_RSP_PRESENT)) 1647 flags = SDHCI_CMD_RESP_NONE; 1648 else if (cmd->flags & MMC_RSP_136) 1649 flags = SDHCI_CMD_RESP_LONG; 1650 else if (cmd->flags & MMC_RSP_BUSY) 1651 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1652 else 1653 flags = SDHCI_CMD_RESP_SHORT; 1654 1655 if (cmd->flags & MMC_RSP_CRC) 1656 flags |= SDHCI_CMD_CRC; 1657 if (cmd->flags & MMC_RSP_OPCODE) 1658 flags |= SDHCI_CMD_INDEX; 1659 1660 /* CMD19 is special in that the Data Present Select should be set */ 1661 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1662 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1663 flags |= SDHCI_CMD_DATA; 1664 1665 timeout = jiffies; 1666 if (host->data_timeout) 1667 timeout += nsecs_to_jiffies(host->data_timeout); 1668 else if (!cmd->data && cmd->busy_timeout > 9000) 1669 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1670 else 1671 timeout += 10 * HZ; 1672 sdhci_mod_timer(host, cmd->mrq, timeout); 1673 1674 if (host->use_external_dma) 1675 sdhci_external_dma_pre_transfer(host, cmd); 1676 1677 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1678 1679 return true; 1680 } 1681 1682 static bool sdhci_present_error(struct sdhci_host *host, 1683 struct mmc_command *cmd, bool present) 1684 { 1685 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1686 cmd->error = -ENOMEDIUM; 1687 return true; 1688 } 1689 1690 return false; 1691 } 1692 1693 static bool sdhci_send_command_retry(struct sdhci_host *host, 1694 struct mmc_command *cmd, 1695 unsigned long flags) 1696 __releases(host->lock) 1697 __acquires(host->lock) 1698 { 1699 struct mmc_command *deferred_cmd = host->deferred_cmd; 1700 int timeout = 10; /* Approx. 10 ms */ 1701 bool present; 1702 1703 while (!sdhci_send_command(host, cmd)) { 1704 if (!timeout--) { 1705 pr_err("%s: Controller never released inhibit bit(s).\n", 1706 mmc_hostname(host->mmc)); 1707 sdhci_dumpregs(host); 1708 cmd->error = -EIO; 1709 return false; 1710 } 1711 1712 spin_unlock_irqrestore(&host->lock, flags); 1713 1714 usleep_range(1000, 1250); 1715 1716 present = host->mmc->ops->get_cd(host->mmc); 1717 1718 spin_lock_irqsave(&host->lock, flags); 1719 1720 /* A deferred command might disappear, handle that */ 1721 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1722 return true; 1723 1724 if (sdhci_present_error(host, cmd, present)) 1725 return false; 1726 } 1727 1728 if (cmd == host->deferred_cmd) 1729 host->deferred_cmd = NULL; 1730 1731 return true; 1732 } 1733 1734 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1735 { 1736 int i, reg; 1737 1738 for (i = 0; i < 4; i++) { 1739 reg = SDHCI_RESPONSE + (3 - i) * 4; 1740 cmd->resp[i] = sdhci_readl(host, reg); 1741 } 1742 1743 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1744 return; 1745 1746 /* CRC is stripped so we need to do some shifting */ 1747 for (i = 0; i < 4; i++) { 1748 cmd->resp[i] <<= 8; 1749 if (i != 3) 1750 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1751 } 1752 } 1753 1754 static void sdhci_finish_command(struct sdhci_host *host) 1755 { 1756 struct mmc_command *cmd = host->cmd; 1757 1758 host->cmd = NULL; 1759 1760 if (cmd->flags & MMC_RSP_PRESENT) { 1761 if (cmd->flags & MMC_RSP_136) { 1762 sdhci_read_rsp_136(host, cmd); 1763 } else { 1764 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1765 } 1766 } 1767 1768 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1769 mmc_command_done(host->mmc, cmd->mrq); 1770 1771 /* 1772 * The host can send and interrupt when the busy state has 1773 * ended, allowing us to wait without wasting CPU cycles. 1774 * The busy signal uses DAT0 so this is similar to waiting 1775 * for data to complete. 1776 * 1777 * Note: The 1.0 specification is a bit ambiguous about this 1778 * feature so there might be some problems with older 1779 * controllers. 1780 */ 1781 if (cmd->flags & MMC_RSP_BUSY) { 1782 if (cmd->data) { 1783 DBG("Cannot wait for busy signal when also doing a data transfer"); 1784 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1785 cmd == host->data_cmd) { 1786 /* Command complete before busy is ended */ 1787 return; 1788 } 1789 } 1790 1791 /* Finished CMD23, now send actual command. */ 1792 if (cmd == cmd->mrq->sbc) { 1793 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1794 WARN_ON(host->deferred_cmd); 1795 host->deferred_cmd = cmd->mrq->cmd; 1796 } 1797 } else { 1798 1799 /* Processed actual command. */ 1800 if (host->data && host->data_early) 1801 sdhci_finish_data(host); 1802 1803 if (!cmd->data) 1804 __sdhci_finish_mrq(host, cmd->mrq); 1805 } 1806 } 1807 1808 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1809 { 1810 u16 preset = 0; 1811 1812 switch (host->timing) { 1813 case MMC_TIMING_MMC_HS: 1814 case MMC_TIMING_SD_HS: 1815 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1816 break; 1817 case MMC_TIMING_UHS_SDR12: 1818 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1819 break; 1820 case MMC_TIMING_UHS_SDR25: 1821 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1822 break; 1823 case MMC_TIMING_UHS_SDR50: 1824 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1825 break; 1826 case MMC_TIMING_UHS_SDR104: 1827 case MMC_TIMING_MMC_HS200: 1828 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1829 break; 1830 case MMC_TIMING_UHS_DDR50: 1831 case MMC_TIMING_MMC_DDR52: 1832 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1833 break; 1834 case MMC_TIMING_MMC_HS400: 1835 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1836 break; 1837 default: 1838 pr_warn("%s: Invalid UHS-I mode selected\n", 1839 mmc_hostname(host->mmc)); 1840 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1841 break; 1842 } 1843 return preset; 1844 } 1845 1846 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1847 unsigned int *actual_clock) 1848 { 1849 int div = 0; /* Initialized for compiler warning */ 1850 int real_div = div, clk_mul = 1; 1851 u16 clk = 0; 1852 bool switch_base_clk = false; 1853 1854 if (host->version >= SDHCI_SPEC_300) { 1855 if (host->preset_enabled) { 1856 u16 pre_val; 1857 1858 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1859 pre_val = sdhci_get_preset_value(host); 1860 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1861 if (host->clk_mul && 1862 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1863 clk = SDHCI_PROG_CLOCK_MODE; 1864 real_div = div + 1; 1865 clk_mul = host->clk_mul; 1866 } else { 1867 real_div = max_t(int, 1, div << 1); 1868 } 1869 goto clock_set; 1870 } 1871 1872 /* 1873 * Check if the Host Controller supports Programmable Clock 1874 * Mode. 1875 */ 1876 if (host->clk_mul) { 1877 for (div = 1; div <= 1024; div++) { 1878 if ((host->max_clk * host->clk_mul / div) 1879 <= clock) 1880 break; 1881 } 1882 if ((host->max_clk * host->clk_mul / div) <= clock) { 1883 /* 1884 * Set Programmable Clock Mode in the Clock 1885 * Control register. 1886 */ 1887 clk = SDHCI_PROG_CLOCK_MODE; 1888 real_div = div; 1889 clk_mul = host->clk_mul; 1890 div--; 1891 } else { 1892 /* 1893 * Divisor can be too small to reach clock 1894 * speed requirement. Then use the base clock. 1895 */ 1896 switch_base_clk = true; 1897 } 1898 } 1899 1900 if (!host->clk_mul || switch_base_clk) { 1901 /* Version 3.00 divisors must be a multiple of 2. */ 1902 if (host->max_clk <= clock) 1903 div = 1; 1904 else { 1905 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1906 div += 2) { 1907 if ((host->max_clk / div) <= clock) 1908 break; 1909 } 1910 } 1911 real_div = div; 1912 div >>= 1; 1913 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1914 && !div && host->max_clk <= 25000000) 1915 div = 1; 1916 } 1917 } else { 1918 /* Version 2.00 divisors must be a power of 2. */ 1919 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1920 if ((host->max_clk / div) <= clock) 1921 break; 1922 } 1923 real_div = div; 1924 div >>= 1; 1925 } 1926 1927 clock_set: 1928 if (real_div) 1929 *actual_clock = (host->max_clk * clk_mul) / real_div; 1930 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1931 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1932 << SDHCI_DIVIDER_HI_SHIFT; 1933 1934 return clk; 1935 } 1936 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1937 1938 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1939 { 1940 ktime_t timeout; 1941 1942 clk |= SDHCI_CLOCK_INT_EN; 1943 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1944 1945 /* Wait max 150 ms */ 1946 timeout = ktime_add_ms(ktime_get(), 150); 1947 while (1) { 1948 bool timedout = ktime_after(ktime_get(), timeout); 1949 1950 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1951 if (clk & SDHCI_CLOCK_INT_STABLE) 1952 break; 1953 if (timedout) { 1954 pr_err("%s: Internal clock never stabilised.\n", 1955 mmc_hostname(host->mmc)); 1956 sdhci_dumpregs(host); 1957 return; 1958 } 1959 udelay(10); 1960 } 1961 1962 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 1963 clk |= SDHCI_CLOCK_PLL_EN; 1964 clk &= ~SDHCI_CLOCK_INT_STABLE; 1965 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1966 1967 /* Wait max 150 ms */ 1968 timeout = ktime_add_ms(ktime_get(), 150); 1969 while (1) { 1970 bool timedout = ktime_after(ktime_get(), timeout); 1971 1972 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1973 if (clk & SDHCI_CLOCK_INT_STABLE) 1974 break; 1975 if (timedout) { 1976 pr_err("%s: PLL clock never stabilised.\n", 1977 mmc_hostname(host->mmc)); 1978 sdhci_dumpregs(host); 1979 return; 1980 } 1981 udelay(10); 1982 } 1983 } 1984 1985 clk |= SDHCI_CLOCK_CARD_EN; 1986 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1987 } 1988 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1989 1990 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1991 { 1992 u16 clk; 1993 1994 host->mmc->actual_clock = 0; 1995 1996 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1997 1998 if (clock == 0) 1999 return; 2000 2001 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2002 sdhci_enable_clk(host, clk); 2003 } 2004 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2005 2006 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2007 unsigned short vdd) 2008 { 2009 struct mmc_host *mmc = host->mmc; 2010 2011 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2012 2013 if (mode != MMC_POWER_OFF) 2014 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2015 else 2016 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2017 } 2018 2019 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2020 unsigned short vdd) 2021 { 2022 u8 pwr = 0; 2023 2024 if (mode != MMC_POWER_OFF) { 2025 switch (1 << vdd) { 2026 case MMC_VDD_165_195: 2027 /* 2028 * Without a regulator, SDHCI does not support 2.0v 2029 * so we only get here if the driver deliberately 2030 * added the 2.0v range to ocr_avail. Map it to 1.8v 2031 * for the purpose of turning on the power. 2032 */ 2033 case MMC_VDD_20_21: 2034 pwr = SDHCI_POWER_180; 2035 break; 2036 case MMC_VDD_29_30: 2037 case MMC_VDD_30_31: 2038 pwr = SDHCI_POWER_300; 2039 break; 2040 case MMC_VDD_32_33: 2041 case MMC_VDD_33_34: 2042 /* 2043 * 3.4 ~ 3.6V are valid only for those platforms where it's 2044 * known that the voltage range is supported by hardware. 2045 */ 2046 case MMC_VDD_34_35: 2047 case MMC_VDD_35_36: 2048 pwr = SDHCI_POWER_330; 2049 break; 2050 default: 2051 WARN(1, "%s: Invalid vdd %#x\n", 2052 mmc_hostname(host->mmc), vdd); 2053 break; 2054 } 2055 } 2056 2057 if (host->pwr == pwr) 2058 return; 2059 2060 host->pwr = pwr; 2061 2062 if (pwr == 0) { 2063 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2064 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2065 sdhci_runtime_pm_bus_off(host); 2066 } else { 2067 /* 2068 * Spec says that we should clear the power reg before setting 2069 * a new value. Some controllers don't seem to like this though. 2070 */ 2071 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2072 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2073 2074 /* 2075 * At least the Marvell CaFe chip gets confused if we set the 2076 * voltage and set turn on power at the same time, so set the 2077 * voltage first. 2078 */ 2079 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2080 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2081 2082 pwr |= SDHCI_POWER_ON; 2083 2084 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2085 2086 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2087 sdhci_runtime_pm_bus_on(host); 2088 2089 /* 2090 * Some controllers need an extra 10ms delay of 10ms before 2091 * they can apply clock after applying power 2092 */ 2093 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2094 mdelay(10); 2095 } 2096 } 2097 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2098 2099 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2100 unsigned short vdd) 2101 { 2102 if (IS_ERR(host->mmc->supply.vmmc)) 2103 sdhci_set_power_noreg(host, mode, vdd); 2104 else 2105 sdhci_set_power_reg(host, mode, vdd); 2106 } 2107 EXPORT_SYMBOL_GPL(sdhci_set_power); 2108 2109 /* 2110 * Some controllers need to configure a valid bus voltage on their power 2111 * register regardless of whether an external regulator is taking care of power 2112 * supply. This helper function takes care of it if set as the controller's 2113 * sdhci_ops.set_power callback. 2114 */ 2115 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2116 unsigned char mode, 2117 unsigned short vdd) 2118 { 2119 if (!IS_ERR(host->mmc->supply.vmmc)) { 2120 struct mmc_host *mmc = host->mmc; 2121 2122 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2123 } 2124 sdhci_set_power_noreg(host, mode, vdd); 2125 } 2126 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2127 2128 /*****************************************************************************\ 2129 * * 2130 * MMC callbacks * 2131 * * 2132 \*****************************************************************************/ 2133 2134 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2135 { 2136 struct sdhci_host *host = mmc_priv(mmc); 2137 struct mmc_command *cmd; 2138 unsigned long flags; 2139 bool present; 2140 2141 /* Firstly check card presence */ 2142 present = mmc->ops->get_cd(mmc); 2143 2144 spin_lock_irqsave(&host->lock, flags); 2145 2146 sdhci_led_activate(host); 2147 2148 if (sdhci_present_error(host, mrq->cmd, present)) 2149 goto out_finish; 2150 2151 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2152 2153 if (!sdhci_send_command_retry(host, cmd, flags)) 2154 goto out_finish; 2155 2156 spin_unlock_irqrestore(&host->lock, flags); 2157 2158 return; 2159 2160 out_finish: 2161 sdhci_finish_mrq(host, mrq); 2162 spin_unlock_irqrestore(&host->lock, flags); 2163 } 2164 EXPORT_SYMBOL_GPL(sdhci_request); 2165 2166 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2167 { 2168 struct sdhci_host *host = mmc_priv(mmc); 2169 struct mmc_command *cmd; 2170 unsigned long flags; 2171 int ret = 0; 2172 2173 spin_lock_irqsave(&host->lock, flags); 2174 2175 if (sdhci_present_error(host, mrq->cmd, true)) { 2176 sdhci_finish_mrq(host, mrq); 2177 goto out_finish; 2178 } 2179 2180 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2181 2182 /* 2183 * The HSQ may send a command in interrupt context without polling 2184 * the busy signaling, which means we should return BUSY if controller 2185 * has not released inhibit bits to allow HSQ trying to send request 2186 * again in non-atomic context. So we should not finish this request 2187 * here. 2188 */ 2189 if (!sdhci_send_command(host, cmd)) 2190 ret = -EBUSY; 2191 else 2192 sdhci_led_activate(host); 2193 2194 out_finish: 2195 spin_unlock_irqrestore(&host->lock, flags); 2196 return ret; 2197 } 2198 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2199 2200 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2201 { 2202 u8 ctrl; 2203 2204 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2205 if (width == MMC_BUS_WIDTH_8) { 2206 ctrl &= ~SDHCI_CTRL_4BITBUS; 2207 ctrl |= SDHCI_CTRL_8BITBUS; 2208 } else { 2209 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2210 ctrl &= ~SDHCI_CTRL_8BITBUS; 2211 if (width == MMC_BUS_WIDTH_4) 2212 ctrl |= SDHCI_CTRL_4BITBUS; 2213 else 2214 ctrl &= ~SDHCI_CTRL_4BITBUS; 2215 } 2216 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2217 } 2218 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2219 2220 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2221 { 2222 u16 ctrl_2; 2223 2224 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2225 /* Select Bus Speed Mode for host */ 2226 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2227 if ((timing == MMC_TIMING_MMC_HS200) || 2228 (timing == MMC_TIMING_UHS_SDR104)) 2229 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2230 else if (timing == MMC_TIMING_UHS_SDR12) 2231 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2232 else if (timing == MMC_TIMING_UHS_SDR25) 2233 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2234 else if (timing == MMC_TIMING_UHS_SDR50) 2235 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2236 else if ((timing == MMC_TIMING_UHS_DDR50) || 2237 (timing == MMC_TIMING_MMC_DDR52)) 2238 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2239 else if (timing == MMC_TIMING_MMC_HS400) 2240 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2241 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2242 } 2243 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2244 2245 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2246 { 2247 struct sdhci_host *host = mmc_priv(mmc); 2248 u8 ctrl; 2249 2250 if (ios->power_mode == MMC_POWER_UNDEFINED) 2251 return; 2252 2253 if (host->flags & SDHCI_DEVICE_DEAD) { 2254 if (!IS_ERR(mmc->supply.vmmc) && 2255 ios->power_mode == MMC_POWER_OFF) 2256 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2257 return; 2258 } 2259 2260 /* 2261 * Reset the chip on each power off. 2262 * Should clear out any weird states. 2263 */ 2264 if (ios->power_mode == MMC_POWER_OFF) { 2265 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2266 sdhci_reinit(host); 2267 } 2268 2269 if (host->version >= SDHCI_SPEC_300 && 2270 (ios->power_mode == MMC_POWER_UP) && 2271 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2272 sdhci_enable_preset_value(host, false); 2273 2274 if (!ios->clock || ios->clock != host->clock) { 2275 host->ops->set_clock(host, ios->clock); 2276 host->clock = ios->clock; 2277 2278 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2279 host->clock) { 2280 host->timeout_clk = mmc->actual_clock ? 2281 mmc->actual_clock / 1000 : 2282 host->clock / 1000; 2283 mmc->max_busy_timeout = 2284 host->ops->get_max_timeout_count ? 2285 host->ops->get_max_timeout_count(host) : 2286 1 << 27; 2287 mmc->max_busy_timeout /= host->timeout_clk; 2288 } 2289 } 2290 2291 if (host->ops->set_power) 2292 host->ops->set_power(host, ios->power_mode, ios->vdd); 2293 else 2294 sdhci_set_power(host, ios->power_mode, ios->vdd); 2295 2296 if (host->ops->platform_send_init_74_clocks) 2297 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2298 2299 host->ops->set_bus_width(host, ios->bus_width); 2300 2301 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2302 2303 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2304 if (ios->timing == MMC_TIMING_SD_HS || 2305 ios->timing == MMC_TIMING_MMC_HS || 2306 ios->timing == MMC_TIMING_MMC_HS400 || 2307 ios->timing == MMC_TIMING_MMC_HS200 || 2308 ios->timing == MMC_TIMING_MMC_DDR52 || 2309 ios->timing == MMC_TIMING_UHS_SDR50 || 2310 ios->timing == MMC_TIMING_UHS_SDR104 || 2311 ios->timing == MMC_TIMING_UHS_DDR50 || 2312 ios->timing == MMC_TIMING_UHS_SDR25) 2313 ctrl |= SDHCI_CTRL_HISPD; 2314 else 2315 ctrl &= ~SDHCI_CTRL_HISPD; 2316 } 2317 2318 if (host->version >= SDHCI_SPEC_300) { 2319 u16 clk, ctrl_2; 2320 2321 if (!host->preset_enabled) { 2322 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2323 /* 2324 * We only need to set Driver Strength if the 2325 * preset value enable is not set. 2326 */ 2327 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2328 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2329 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2330 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2331 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2332 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2333 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2334 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2335 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2336 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2337 else { 2338 pr_warn("%s: invalid driver type, default to driver type B\n", 2339 mmc_hostname(mmc)); 2340 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2341 } 2342 2343 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2344 } else { 2345 /* 2346 * According to SDHC Spec v3.00, if the Preset Value 2347 * Enable in the Host Control 2 register is set, we 2348 * need to reset SD Clock Enable before changing High 2349 * Speed Enable to avoid generating clock gliches. 2350 */ 2351 2352 /* Reset SD Clock Enable */ 2353 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2354 clk &= ~SDHCI_CLOCK_CARD_EN; 2355 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2356 2357 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2358 2359 /* Re-enable SD Clock */ 2360 host->ops->set_clock(host, host->clock); 2361 } 2362 2363 /* Reset SD Clock Enable */ 2364 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2365 clk &= ~SDHCI_CLOCK_CARD_EN; 2366 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2367 2368 host->ops->set_uhs_signaling(host, ios->timing); 2369 host->timing = ios->timing; 2370 2371 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2372 ((ios->timing == MMC_TIMING_UHS_SDR12) || 2373 (ios->timing == MMC_TIMING_UHS_SDR25) || 2374 (ios->timing == MMC_TIMING_UHS_SDR50) || 2375 (ios->timing == MMC_TIMING_UHS_SDR104) || 2376 (ios->timing == MMC_TIMING_UHS_DDR50) || 2377 (ios->timing == MMC_TIMING_MMC_DDR52))) { 2378 u16 preset; 2379 2380 sdhci_enable_preset_value(host, true); 2381 preset = sdhci_get_preset_value(host); 2382 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2383 preset); 2384 } 2385 2386 /* Re-enable SD Clock */ 2387 host->ops->set_clock(host, host->clock); 2388 } else 2389 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2390 2391 /* 2392 * Some (ENE) controllers go apeshit on some ios operation, 2393 * signalling timeout and CRC errors even on CMD0. Resetting 2394 * it on each ios seems to solve the problem. 2395 */ 2396 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 2397 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2398 } 2399 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2400 2401 static int sdhci_get_cd(struct mmc_host *mmc) 2402 { 2403 struct sdhci_host *host = mmc_priv(mmc); 2404 int gpio_cd = mmc_gpio_get_cd(mmc); 2405 2406 if (host->flags & SDHCI_DEVICE_DEAD) 2407 return 0; 2408 2409 /* If nonremovable, assume that the card is always present. */ 2410 if (!mmc_card_is_removable(mmc)) 2411 return 1; 2412 2413 /* 2414 * Try slot gpio detect, if defined it take precedence 2415 * over build in controller functionality 2416 */ 2417 if (gpio_cd >= 0) 2418 return !!gpio_cd; 2419 2420 /* If polling, assume that the card is always present. */ 2421 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2422 return 1; 2423 2424 /* Host native card detect */ 2425 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2426 } 2427 2428 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2429 { 2430 struct sdhci_host *host = mmc_priv(mmc); 2431 unsigned long flags; 2432 int ret = 0; 2433 2434 spin_lock_irqsave(&host->lock, flags); 2435 2436 if (host->flags & SDHCI_DEVICE_DEAD) 2437 goto out; 2438 2439 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2440 out: 2441 spin_unlock_irqrestore(&host->lock, flags); 2442 2443 return ret; 2444 } 2445 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2446 2447 static int sdhci_check_ro(struct sdhci_host *host) 2448 { 2449 unsigned long flags; 2450 int is_readonly; 2451 2452 spin_lock_irqsave(&host->lock, flags); 2453 2454 if (host->flags & SDHCI_DEVICE_DEAD) 2455 is_readonly = 0; 2456 else if (host->ops->get_ro) 2457 is_readonly = host->ops->get_ro(host); 2458 else if (mmc_can_gpio_ro(host->mmc)) 2459 is_readonly = mmc_gpio_get_ro(host->mmc); 2460 else 2461 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2462 & SDHCI_WRITE_PROTECT); 2463 2464 spin_unlock_irqrestore(&host->lock, flags); 2465 2466 /* This quirk needs to be replaced by a callback-function later */ 2467 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2468 !is_readonly : is_readonly; 2469 } 2470 2471 #define SAMPLE_COUNT 5 2472 2473 static int sdhci_get_ro(struct mmc_host *mmc) 2474 { 2475 struct sdhci_host *host = mmc_priv(mmc); 2476 int i, ro_count; 2477 2478 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2479 return sdhci_check_ro(host); 2480 2481 ro_count = 0; 2482 for (i = 0; i < SAMPLE_COUNT; i++) { 2483 if (sdhci_check_ro(host)) { 2484 if (++ro_count > SAMPLE_COUNT / 2) 2485 return 1; 2486 } 2487 msleep(30); 2488 } 2489 return 0; 2490 } 2491 2492 static void sdhci_hw_reset(struct mmc_host *mmc) 2493 { 2494 struct sdhci_host *host = mmc_priv(mmc); 2495 2496 if (host->ops && host->ops->hw_reset) 2497 host->ops->hw_reset(host); 2498 } 2499 2500 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2501 { 2502 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2503 if (enable) 2504 host->ier |= SDHCI_INT_CARD_INT; 2505 else 2506 host->ier &= ~SDHCI_INT_CARD_INT; 2507 2508 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2509 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2510 } 2511 } 2512 2513 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2514 { 2515 struct sdhci_host *host = mmc_priv(mmc); 2516 unsigned long flags; 2517 2518 if (enable) 2519 pm_runtime_get_noresume(mmc_dev(mmc)); 2520 2521 spin_lock_irqsave(&host->lock, flags); 2522 sdhci_enable_sdio_irq_nolock(host, enable); 2523 spin_unlock_irqrestore(&host->lock, flags); 2524 2525 if (!enable) 2526 pm_runtime_put_noidle(mmc_dev(mmc)); 2527 } 2528 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2529 2530 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2531 { 2532 struct sdhci_host *host = mmc_priv(mmc); 2533 unsigned long flags; 2534 2535 spin_lock_irqsave(&host->lock, flags); 2536 sdhci_enable_sdio_irq_nolock(host, true); 2537 spin_unlock_irqrestore(&host->lock, flags); 2538 } 2539 2540 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2541 struct mmc_ios *ios) 2542 { 2543 struct sdhci_host *host = mmc_priv(mmc); 2544 u16 ctrl; 2545 int ret; 2546 2547 /* 2548 * Signal Voltage Switching is only applicable for Host Controllers 2549 * v3.00 and above. 2550 */ 2551 if (host->version < SDHCI_SPEC_300) 2552 return 0; 2553 2554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2555 2556 switch (ios->signal_voltage) { 2557 case MMC_SIGNAL_VOLTAGE_330: 2558 if (!(host->flags & SDHCI_SIGNALING_330)) 2559 return -EINVAL; 2560 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2561 ctrl &= ~SDHCI_CTRL_VDD_180; 2562 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2563 2564 if (!IS_ERR(mmc->supply.vqmmc)) { 2565 ret = mmc_regulator_set_vqmmc(mmc, ios); 2566 if (ret < 0) { 2567 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2568 mmc_hostname(mmc)); 2569 return -EIO; 2570 } 2571 } 2572 /* Wait for 5ms */ 2573 usleep_range(5000, 5500); 2574 2575 /* 3.3V regulator output should be stable within 5 ms */ 2576 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2577 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2578 return 0; 2579 2580 pr_warn("%s: 3.3V regulator output did not become stable\n", 2581 mmc_hostname(mmc)); 2582 2583 return -EAGAIN; 2584 case MMC_SIGNAL_VOLTAGE_180: 2585 if (!(host->flags & SDHCI_SIGNALING_180)) 2586 return -EINVAL; 2587 if (!IS_ERR(mmc->supply.vqmmc)) { 2588 ret = mmc_regulator_set_vqmmc(mmc, ios); 2589 if (ret < 0) { 2590 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2591 mmc_hostname(mmc)); 2592 return -EIO; 2593 } 2594 } 2595 2596 /* 2597 * Enable 1.8V Signal Enable in the Host Control2 2598 * register 2599 */ 2600 ctrl |= SDHCI_CTRL_VDD_180; 2601 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2602 2603 /* Some controller need to do more when switching */ 2604 if (host->ops->voltage_switch) 2605 host->ops->voltage_switch(host); 2606 2607 /* 1.8V regulator output should be stable within 5 ms */ 2608 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2609 if (ctrl & SDHCI_CTRL_VDD_180) 2610 return 0; 2611 2612 pr_warn("%s: 1.8V regulator output did not become stable\n", 2613 mmc_hostname(mmc)); 2614 2615 return -EAGAIN; 2616 case MMC_SIGNAL_VOLTAGE_120: 2617 if (!(host->flags & SDHCI_SIGNALING_120)) 2618 return -EINVAL; 2619 if (!IS_ERR(mmc->supply.vqmmc)) { 2620 ret = mmc_regulator_set_vqmmc(mmc, ios); 2621 if (ret < 0) { 2622 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2623 mmc_hostname(mmc)); 2624 return -EIO; 2625 } 2626 } 2627 return 0; 2628 default: 2629 /* No signal voltage switch required */ 2630 return 0; 2631 } 2632 } 2633 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2634 2635 static int sdhci_card_busy(struct mmc_host *mmc) 2636 { 2637 struct sdhci_host *host = mmc_priv(mmc); 2638 u32 present_state; 2639 2640 /* Check whether DAT[0] is 0 */ 2641 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2642 2643 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2644 } 2645 2646 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2647 { 2648 struct sdhci_host *host = mmc_priv(mmc); 2649 unsigned long flags; 2650 2651 spin_lock_irqsave(&host->lock, flags); 2652 host->flags |= SDHCI_HS400_TUNING; 2653 spin_unlock_irqrestore(&host->lock, flags); 2654 2655 return 0; 2656 } 2657 2658 void sdhci_start_tuning(struct sdhci_host *host) 2659 { 2660 u16 ctrl; 2661 2662 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2663 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2664 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2665 ctrl |= SDHCI_CTRL_TUNED_CLK; 2666 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2667 2668 /* 2669 * As per the Host Controller spec v3.00, tuning command 2670 * generates Buffer Read Ready interrupt, so enable that. 2671 * 2672 * Note: The spec clearly says that when tuning sequence 2673 * is being performed, the controller does not generate 2674 * interrupts other than Buffer Read Ready interrupt. But 2675 * to make sure we don't hit a controller bug, we _only_ 2676 * enable Buffer Read Ready interrupt here. 2677 */ 2678 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2679 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2680 } 2681 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2682 2683 void sdhci_end_tuning(struct sdhci_host *host) 2684 { 2685 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2686 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2687 } 2688 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2689 2690 void sdhci_reset_tuning(struct sdhci_host *host) 2691 { 2692 u16 ctrl; 2693 2694 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2695 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2696 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2697 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2698 } 2699 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2700 2701 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2702 { 2703 sdhci_reset_tuning(host); 2704 2705 sdhci_do_reset(host, SDHCI_RESET_CMD); 2706 sdhci_do_reset(host, SDHCI_RESET_DATA); 2707 2708 sdhci_end_tuning(host); 2709 2710 mmc_send_abort_tuning(host->mmc, opcode); 2711 } 2712 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2713 2714 /* 2715 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2716 * tuning command does not have a data payload (or rather the hardware does it 2717 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2718 * interrupt setup is different to other commands and there is no timeout 2719 * interrupt so special handling is needed. 2720 */ 2721 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2722 { 2723 struct mmc_host *mmc = host->mmc; 2724 struct mmc_command cmd = {}; 2725 struct mmc_request mrq = {}; 2726 unsigned long flags; 2727 u32 b = host->sdma_boundary; 2728 2729 spin_lock_irqsave(&host->lock, flags); 2730 2731 cmd.opcode = opcode; 2732 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2733 cmd.mrq = &mrq; 2734 2735 mrq.cmd = &cmd; 2736 /* 2737 * In response to CMD19, the card sends 64 bytes of tuning 2738 * block to the Host Controller. So we set the block size 2739 * to 64 here. 2740 */ 2741 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2742 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2743 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2744 else 2745 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2746 2747 /* 2748 * The tuning block is sent by the card to the host controller. 2749 * So we set the TRNS_READ bit in the Transfer Mode register. 2750 * This also takes care of setting DMA Enable and Multi Block 2751 * Select in the same register to 0. 2752 */ 2753 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2754 2755 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2756 spin_unlock_irqrestore(&host->lock, flags); 2757 host->tuning_done = 0; 2758 return; 2759 } 2760 2761 host->cmd = NULL; 2762 2763 sdhci_del_timer(host, &mrq); 2764 2765 host->tuning_done = 0; 2766 2767 spin_unlock_irqrestore(&host->lock, flags); 2768 2769 /* Wait for Buffer Read Ready interrupt */ 2770 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2771 msecs_to_jiffies(50)); 2772 2773 } 2774 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2775 2776 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2777 { 2778 int i; 2779 2780 /* 2781 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2782 * of loops reaches tuning loop count. 2783 */ 2784 for (i = 0; i < host->tuning_loop_count; i++) { 2785 u16 ctrl; 2786 2787 sdhci_send_tuning(host, opcode); 2788 2789 if (!host->tuning_done) { 2790 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2791 mmc_hostname(host->mmc)); 2792 sdhci_abort_tuning(host, opcode); 2793 return -ETIMEDOUT; 2794 } 2795 2796 /* Spec does not require a delay between tuning cycles */ 2797 if (host->tuning_delay > 0) 2798 mdelay(host->tuning_delay); 2799 2800 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2801 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2802 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2803 return 0; /* Success! */ 2804 break; 2805 } 2806 2807 } 2808 2809 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2810 mmc_hostname(host->mmc)); 2811 sdhci_reset_tuning(host); 2812 return -EAGAIN; 2813 } 2814 2815 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2816 { 2817 struct sdhci_host *host = mmc_priv(mmc); 2818 int err = 0; 2819 unsigned int tuning_count = 0; 2820 bool hs400_tuning; 2821 2822 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2823 2824 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2825 tuning_count = host->tuning_count; 2826 2827 /* 2828 * The Host Controller needs tuning in case of SDR104 and DDR50 2829 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2830 * the Capabilities register. 2831 * If the Host Controller supports the HS200 mode then the 2832 * tuning function has to be executed. 2833 */ 2834 switch (host->timing) { 2835 /* HS400 tuning is done in HS200 mode */ 2836 case MMC_TIMING_MMC_HS400: 2837 err = -EINVAL; 2838 goto out; 2839 2840 case MMC_TIMING_MMC_HS200: 2841 /* 2842 * Periodic re-tuning for HS400 is not expected to be needed, so 2843 * disable it here. 2844 */ 2845 if (hs400_tuning) 2846 tuning_count = 0; 2847 break; 2848 2849 case MMC_TIMING_UHS_SDR104: 2850 case MMC_TIMING_UHS_DDR50: 2851 break; 2852 2853 case MMC_TIMING_UHS_SDR50: 2854 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2855 break; 2856 fallthrough; 2857 2858 default: 2859 goto out; 2860 } 2861 2862 if (host->ops->platform_execute_tuning) { 2863 err = host->ops->platform_execute_tuning(host, opcode); 2864 goto out; 2865 } 2866 2867 mmc->retune_period = tuning_count; 2868 2869 if (host->tuning_delay < 0) 2870 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2871 2872 sdhci_start_tuning(host); 2873 2874 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2875 2876 sdhci_end_tuning(host); 2877 out: 2878 host->flags &= ~SDHCI_HS400_TUNING; 2879 2880 return err; 2881 } 2882 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2883 2884 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2885 { 2886 /* Host Controller v3.00 defines preset value registers */ 2887 if (host->version < SDHCI_SPEC_300) 2888 return; 2889 2890 /* 2891 * We only enable or disable Preset Value if they are not already 2892 * enabled or disabled respectively. Otherwise, we bail out. 2893 */ 2894 if (host->preset_enabled != enable) { 2895 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2896 2897 if (enable) 2898 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2899 else 2900 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2901 2902 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2903 2904 if (enable) 2905 host->flags |= SDHCI_PV_ENABLED; 2906 else 2907 host->flags &= ~SDHCI_PV_ENABLED; 2908 2909 host->preset_enabled = enable; 2910 } 2911 } 2912 2913 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2914 int err) 2915 { 2916 struct mmc_data *data = mrq->data; 2917 2918 if (data->host_cookie != COOKIE_UNMAPPED) 2919 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 2920 mmc_get_dma_dir(data)); 2921 2922 data->host_cookie = COOKIE_UNMAPPED; 2923 } 2924 2925 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2926 { 2927 struct sdhci_host *host = mmc_priv(mmc); 2928 2929 mrq->data->host_cookie = COOKIE_UNMAPPED; 2930 2931 /* 2932 * No pre-mapping in the pre hook if we're using the bounce buffer, 2933 * for that we would need two bounce buffers since one buffer is 2934 * in flight when this is getting called. 2935 */ 2936 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2937 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2938 } 2939 2940 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2941 { 2942 if (host->data_cmd) { 2943 host->data_cmd->error = err; 2944 sdhci_finish_mrq(host, host->data_cmd->mrq); 2945 } 2946 2947 if (host->cmd) { 2948 host->cmd->error = err; 2949 sdhci_finish_mrq(host, host->cmd->mrq); 2950 } 2951 } 2952 2953 static void sdhci_card_event(struct mmc_host *mmc) 2954 { 2955 struct sdhci_host *host = mmc_priv(mmc); 2956 unsigned long flags; 2957 int present; 2958 2959 /* First check if client has provided their own card event */ 2960 if (host->ops->card_event) 2961 host->ops->card_event(host); 2962 2963 present = mmc->ops->get_cd(mmc); 2964 2965 spin_lock_irqsave(&host->lock, flags); 2966 2967 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2968 if (sdhci_has_requests(host) && !present) { 2969 pr_err("%s: Card removed during transfer!\n", 2970 mmc_hostname(mmc)); 2971 pr_err("%s: Resetting controller.\n", 2972 mmc_hostname(mmc)); 2973 2974 sdhci_do_reset(host, SDHCI_RESET_CMD); 2975 sdhci_do_reset(host, SDHCI_RESET_DATA); 2976 2977 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2978 } 2979 2980 spin_unlock_irqrestore(&host->lock, flags); 2981 } 2982 2983 static const struct mmc_host_ops sdhci_ops = { 2984 .request = sdhci_request, 2985 .post_req = sdhci_post_req, 2986 .pre_req = sdhci_pre_req, 2987 .set_ios = sdhci_set_ios, 2988 .get_cd = sdhci_get_cd, 2989 .get_ro = sdhci_get_ro, 2990 .hw_reset = sdhci_hw_reset, 2991 .enable_sdio_irq = sdhci_enable_sdio_irq, 2992 .ack_sdio_irq = sdhci_ack_sdio_irq, 2993 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2994 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2995 .execute_tuning = sdhci_execute_tuning, 2996 .card_event = sdhci_card_event, 2997 .card_busy = sdhci_card_busy, 2998 }; 2999 3000 /*****************************************************************************\ 3001 * * 3002 * Request done * 3003 * * 3004 \*****************************************************************************/ 3005 3006 static bool sdhci_request_done(struct sdhci_host *host) 3007 { 3008 unsigned long flags; 3009 struct mmc_request *mrq; 3010 int i; 3011 3012 spin_lock_irqsave(&host->lock, flags); 3013 3014 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3015 mrq = host->mrqs_done[i]; 3016 if (mrq) 3017 break; 3018 } 3019 3020 if (!mrq) { 3021 spin_unlock_irqrestore(&host->lock, flags); 3022 return true; 3023 } 3024 3025 /* 3026 * The controller needs a reset of internal state machines 3027 * upon error conditions. 3028 */ 3029 if (sdhci_needs_reset(host, mrq)) { 3030 /* 3031 * Do not finish until command and data lines are available for 3032 * reset. Note there can only be one other mrq, so it cannot 3033 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3034 * would both be null. 3035 */ 3036 if (host->cmd || host->data_cmd) { 3037 spin_unlock_irqrestore(&host->lock, flags); 3038 return true; 3039 } 3040 3041 /* Some controllers need this kick or reset won't work here */ 3042 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3043 /* This is to force an update */ 3044 host->ops->set_clock(host, host->clock); 3045 3046 /* 3047 * Spec says we should do both at the same time, but Ricoh 3048 * controllers do not like that. 3049 */ 3050 sdhci_do_reset(host, SDHCI_RESET_CMD); 3051 sdhci_do_reset(host, SDHCI_RESET_DATA); 3052 3053 host->pending_reset = false; 3054 } 3055 3056 /* 3057 * Always unmap the data buffers if they were mapped by 3058 * sdhci_prepare_data() whenever we finish with a request. 3059 * This avoids leaking DMA mappings on error. 3060 */ 3061 if (host->flags & SDHCI_REQ_USE_DMA) { 3062 struct mmc_data *data = mrq->data; 3063 3064 if (host->use_external_dma && data && 3065 (mrq->cmd->error || data->error)) { 3066 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3067 3068 host->mrqs_done[i] = NULL; 3069 spin_unlock_irqrestore(&host->lock, flags); 3070 dmaengine_terminate_sync(chan); 3071 spin_lock_irqsave(&host->lock, flags); 3072 sdhci_set_mrq_done(host, mrq); 3073 } 3074 3075 if (data && data->host_cookie == COOKIE_MAPPED) { 3076 if (host->bounce_buffer) { 3077 /* 3078 * On reads, copy the bounced data into the 3079 * sglist 3080 */ 3081 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3082 unsigned int length = data->bytes_xfered; 3083 3084 if (length > host->bounce_buffer_size) { 3085 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3086 mmc_hostname(host->mmc), 3087 host->bounce_buffer_size, 3088 data->bytes_xfered); 3089 /* Cap it down and continue */ 3090 length = host->bounce_buffer_size; 3091 } 3092 dma_sync_single_for_cpu( 3093 mmc_dev(host->mmc), 3094 host->bounce_addr, 3095 host->bounce_buffer_size, 3096 DMA_FROM_DEVICE); 3097 sg_copy_from_buffer(data->sg, 3098 data->sg_len, 3099 host->bounce_buffer, 3100 length); 3101 } else { 3102 /* No copying, just switch ownership */ 3103 dma_sync_single_for_cpu( 3104 mmc_dev(host->mmc), 3105 host->bounce_addr, 3106 host->bounce_buffer_size, 3107 mmc_get_dma_dir(data)); 3108 } 3109 } else { 3110 /* Unmap the raw data */ 3111 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3112 data->sg_len, 3113 mmc_get_dma_dir(data)); 3114 } 3115 data->host_cookie = COOKIE_UNMAPPED; 3116 } 3117 } 3118 3119 host->mrqs_done[i] = NULL; 3120 3121 spin_unlock_irqrestore(&host->lock, flags); 3122 3123 if (host->ops->request_done) 3124 host->ops->request_done(host, mrq); 3125 else 3126 mmc_request_done(host->mmc, mrq); 3127 3128 return false; 3129 } 3130 3131 static void sdhci_complete_work(struct work_struct *work) 3132 { 3133 struct sdhci_host *host = container_of(work, struct sdhci_host, 3134 complete_work); 3135 3136 while (!sdhci_request_done(host)) 3137 ; 3138 } 3139 3140 static void sdhci_timeout_timer(struct timer_list *t) 3141 { 3142 struct sdhci_host *host; 3143 unsigned long flags; 3144 3145 host = from_timer(host, t, timer); 3146 3147 spin_lock_irqsave(&host->lock, flags); 3148 3149 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3150 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3151 mmc_hostname(host->mmc)); 3152 sdhci_dumpregs(host); 3153 3154 host->cmd->error = -ETIMEDOUT; 3155 sdhci_finish_mrq(host, host->cmd->mrq); 3156 } 3157 3158 spin_unlock_irqrestore(&host->lock, flags); 3159 } 3160 3161 static void sdhci_timeout_data_timer(struct timer_list *t) 3162 { 3163 struct sdhci_host *host; 3164 unsigned long flags; 3165 3166 host = from_timer(host, t, data_timer); 3167 3168 spin_lock_irqsave(&host->lock, flags); 3169 3170 if (host->data || host->data_cmd || 3171 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3172 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3173 mmc_hostname(host->mmc)); 3174 sdhci_dumpregs(host); 3175 3176 if (host->data) { 3177 host->data->error = -ETIMEDOUT; 3178 __sdhci_finish_data(host, true); 3179 queue_work(host->complete_wq, &host->complete_work); 3180 } else if (host->data_cmd) { 3181 host->data_cmd->error = -ETIMEDOUT; 3182 sdhci_finish_mrq(host, host->data_cmd->mrq); 3183 } else { 3184 host->cmd->error = -ETIMEDOUT; 3185 sdhci_finish_mrq(host, host->cmd->mrq); 3186 } 3187 } 3188 3189 spin_unlock_irqrestore(&host->lock, flags); 3190 } 3191 3192 /*****************************************************************************\ 3193 * * 3194 * Interrupt handling * 3195 * * 3196 \*****************************************************************************/ 3197 3198 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3199 { 3200 /* Handle auto-CMD12 error */ 3201 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3202 struct mmc_request *mrq = host->data_cmd->mrq; 3203 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3204 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3205 SDHCI_INT_DATA_TIMEOUT : 3206 SDHCI_INT_DATA_CRC; 3207 3208 /* Treat auto-CMD12 error the same as data error */ 3209 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3210 *intmask_p |= data_err_bit; 3211 return; 3212 } 3213 } 3214 3215 if (!host->cmd) { 3216 /* 3217 * SDHCI recovers from errors by resetting the cmd and data 3218 * circuits. Until that is done, there very well might be more 3219 * interrupts, so ignore them in that case. 3220 */ 3221 if (host->pending_reset) 3222 return; 3223 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3224 mmc_hostname(host->mmc), (unsigned)intmask); 3225 sdhci_dumpregs(host); 3226 return; 3227 } 3228 3229 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3230 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3231 if (intmask & SDHCI_INT_TIMEOUT) 3232 host->cmd->error = -ETIMEDOUT; 3233 else 3234 host->cmd->error = -EILSEQ; 3235 3236 /* Treat data command CRC error the same as data CRC error */ 3237 if (host->cmd->data && 3238 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3239 SDHCI_INT_CRC) { 3240 host->cmd = NULL; 3241 *intmask_p |= SDHCI_INT_DATA_CRC; 3242 return; 3243 } 3244 3245 __sdhci_finish_mrq(host, host->cmd->mrq); 3246 return; 3247 } 3248 3249 /* Handle auto-CMD23 error */ 3250 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3251 struct mmc_request *mrq = host->cmd->mrq; 3252 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3253 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3254 -ETIMEDOUT : 3255 -EILSEQ; 3256 3257 if (sdhci_auto_cmd23(host, mrq)) { 3258 mrq->sbc->error = err; 3259 __sdhci_finish_mrq(host, mrq); 3260 return; 3261 } 3262 } 3263 3264 if (intmask & SDHCI_INT_RESPONSE) 3265 sdhci_finish_command(host); 3266 } 3267 3268 static void sdhci_adma_show_error(struct sdhci_host *host) 3269 { 3270 void *desc = host->adma_table; 3271 dma_addr_t dma = host->adma_addr; 3272 3273 sdhci_dumpregs(host); 3274 3275 while (true) { 3276 struct sdhci_adma2_64_desc *dma_desc = desc; 3277 3278 if (host->flags & SDHCI_USE_64_BIT_DMA) 3279 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3280 (unsigned long long)dma, 3281 le32_to_cpu(dma_desc->addr_hi), 3282 le32_to_cpu(dma_desc->addr_lo), 3283 le16_to_cpu(dma_desc->len), 3284 le16_to_cpu(dma_desc->cmd)); 3285 else 3286 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3287 (unsigned long long)dma, 3288 le32_to_cpu(dma_desc->addr_lo), 3289 le16_to_cpu(dma_desc->len), 3290 le16_to_cpu(dma_desc->cmd)); 3291 3292 desc += host->desc_sz; 3293 dma += host->desc_sz; 3294 3295 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3296 break; 3297 } 3298 } 3299 3300 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3301 { 3302 u32 command; 3303 3304 /* 3305 * CMD19 generates _only_ Buffer Read Ready interrupt if 3306 * use sdhci_send_tuning. 3307 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3308 * If not, sdhci_transfer_pio will never be called, make the 3309 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3310 */ 3311 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3312 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 3313 if (command == MMC_SEND_TUNING_BLOCK || 3314 command == MMC_SEND_TUNING_BLOCK_HS200) { 3315 host->tuning_done = 1; 3316 wake_up(&host->buf_ready_int); 3317 return; 3318 } 3319 } 3320 3321 if (!host->data) { 3322 struct mmc_command *data_cmd = host->data_cmd; 3323 3324 /* 3325 * The "data complete" interrupt is also used to 3326 * indicate that a busy state has ended. See comment 3327 * above in sdhci_cmd_irq(). 3328 */ 3329 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3330 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3331 host->data_cmd = NULL; 3332 data_cmd->error = -ETIMEDOUT; 3333 __sdhci_finish_mrq(host, data_cmd->mrq); 3334 return; 3335 } 3336 if (intmask & SDHCI_INT_DATA_END) { 3337 host->data_cmd = NULL; 3338 /* 3339 * Some cards handle busy-end interrupt 3340 * before the command completed, so make 3341 * sure we do things in the proper order. 3342 */ 3343 if (host->cmd == data_cmd) 3344 return; 3345 3346 __sdhci_finish_mrq(host, data_cmd->mrq); 3347 return; 3348 } 3349 } 3350 3351 /* 3352 * SDHCI recovers from errors by resetting the cmd and data 3353 * circuits. Until that is done, there very well might be more 3354 * interrupts, so ignore them in that case. 3355 */ 3356 if (host->pending_reset) 3357 return; 3358 3359 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3360 mmc_hostname(host->mmc), (unsigned)intmask); 3361 sdhci_dumpregs(host); 3362 3363 return; 3364 } 3365 3366 if (intmask & SDHCI_INT_DATA_TIMEOUT) 3367 host->data->error = -ETIMEDOUT; 3368 else if (intmask & SDHCI_INT_DATA_END_BIT) 3369 host->data->error = -EILSEQ; 3370 else if ((intmask & SDHCI_INT_DATA_CRC) && 3371 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3372 != MMC_BUS_TEST_R) 3373 host->data->error = -EILSEQ; 3374 else if (intmask & SDHCI_INT_ADMA_ERROR) { 3375 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3376 intmask); 3377 sdhci_adma_show_error(host); 3378 host->data->error = -EIO; 3379 if (host->ops->adma_workaround) 3380 host->ops->adma_workaround(host, intmask); 3381 } 3382 3383 if (host->data->error) 3384 sdhci_finish_data(host); 3385 else { 3386 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3387 sdhci_transfer_pio(host); 3388 3389 /* 3390 * We currently don't do anything fancy with DMA 3391 * boundaries, but as we can't disable the feature 3392 * we need to at least restart the transfer. 3393 * 3394 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3395 * should return a valid address to continue from, but as 3396 * some controllers are faulty, don't trust them. 3397 */ 3398 if (intmask & SDHCI_INT_DMA_END) { 3399 dma_addr_t dmastart, dmanow; 3400 3401 dmastart = sdhci_sdma_address(host); 3402 dmanow = dmastart + host->data->bytes_xfered; 3403 /* 3404 * Force update to the next DMA block boundary. 3405 */ 3406 dmanow = (dmanow & 3407 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3408 SDHCI_DEFAULT_BOUNDARY_SIZE; 3409 host->data->bytes_xfered = dmanow - dmastart; 3410 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3411 &dmastart, host->data->bytes_xfered, &dmanow); 3412 sdhci_set_sdma_addr(host, dmanow); 3413 } 3414 3415 if (intmask & SDHCI_INT_DATA_END) { 3416 if (host->cmd == host->data_cmd) { 3417 /* 3418 * Data managed to finish before the 3419 * command completed. Make sure we do 3420 * things in the proper order. 3421 */ 3422 host->data_early = 1; 3423 } else { 3424 sdhci_finish_data(host); 3425 } 3426 } 3427 } 3428 } 3429 3430 static inline bool sdhci_defer_done(struct sdhci_host *host, 3431 struct mmc_request *mrq) 3432 { 3433 struct mmc_data *data = mrq->data; 3434 3435 return host->pending_reset || host->always_defer_done || 3436 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3437 data->host_cookie == COOKIE_MAPPED); 3438 } 3439 3440 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3441 { 3442 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3443 irqreturn_t result = IRQ_NONE; 3444 struct sdhci_host *host = dev_id; 3445 u32 intmask, mask, unexpected = 0; 3446 int max_loops = 16; 3447 int i; 3448 3449 spin_lock(&host->lock); 3450 3451 if (host->runtime_suspended) { 3452 spin_unlock(&host->lock); 3453 return IRQ_NONE; 3454 } 3455 3456 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3457 if (!intmask || intmask == 0xffffffff) { 3458 result = IRQ_NONE; 3459 goto out; 3460 } 3461 3462 do { 3463 DBG("IRQ status 0x%08x\n", intmask); 3464 3465 if (host->ops->irq) { 3466 intmask = host->ops->irq(host, intmask); 3467 if (!intmask) 3468 goto cont; 3469 } 3470 3471 /* Clear selected interrupts. */ 3472 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3473 SDHCI_INT_BUS_POWER); 3474 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3475 3476 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3477 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3478 SDHCI_CARD_PRESENT; 3479 3480 /* 3481 * There is a observation on i.mx esdhc. INSERT 3482 * bit will be immediately set again when it gets 3483 * cleared, if a card is inserted. We have to mask 3484 * the irq to prevent interrupt storm which will 3485 * freeze the system. And the REMOVE gets the 3486 * same situation. 3487 * 3488 * More testing are needed here to ensure it works 3489 * for other platforms though. 3490 */ 3491 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3492 SDHCI_INT_CARD_REMOVE); 3493 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3494 SDHCI_INT_CARD_INSERT; 3495 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3496 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3497 3498 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3499 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3500 3501 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3502 SDHCI_INT_CARD_REMOVE); 3503 result = IRQ_WAKE_THREAD; 3504 } 3505 3506 if (intmask & SDHCI_INT_CMD_MASK) 3507 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3508 3509 if (intmask & SDHCI_INT_DATA_MASK) 3510 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3511 3512 if (intmask & SDHCI_INT_BUS_POWER) 3513 pr_err("%s: Card is consuming too much power!\n", 3514 mmc_hostname(host->mmc)); 3515 3516 if (intmask & SDHCI_INT_RETUNE) 3517 mmc_retune_needed(host->mmc); 3518 3519 if ((intmask & SDHCI_INT_CARD_INT) && 3520 (host->ier & SDHCI_INT_CARD_INT)) { 3521 sdhci_enable_sdio_irq_nolock(host, false); 3522 sdio_signal_irq(host->mmc); 3523 } 3524 3525 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3526 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3527 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3528 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3529 3530 if (intmask) { 3531 unexpected |= intmask; 3532 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3533 } 3534 cont: 3535 if (result == IRQ_NONE) 3536 result = IRQ_HANDLED; 3537 3538 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3539 } while (intmask && --max_loops); 3540 3541 /* Determine if mrqs can be completed immediately */ 3542 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3543 struct mmc_request *mrq = host->mrqs_done[i]; 3544 3545 if (!mrq) 3546 continue; 3547 3548 if (sdhci_defer_done(host, mrq)) { 3549 result = IRQ_WAKE_THREAD; 3550 } else { 3551 mrqs_done[i] = mrq; 3552 host->mrqs_done[i] = NULL; 3553 } 3554 } 3555 out: 3556 if (host->deferred_cmd) 3557 result = IRQ_WAKE_THREAD; 3558 3559 spin_unlock(&host->lock); 3560 3561 /* Process mrqs ready for immediate completion */ 3562 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3563 if (!mrqs_done[i]) 3564 continue; 3565 3566 if (host->ops->request_done) 3567 host->ops->request_done(host, mrqs_done[i]); 3568 else 3569 mmc_request_done(host->mmc, mrqs_done[i]); 3570 } 3571 3572 if (unexpected) { 3573 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3574 mmc_hostname(host->mmc), unexpected); 3575 sdhci_dumpregs(host); 3576 } 3577 3578 return result; 3579 } 3580 3581 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3582 { 3583 struct sdhci_host *host = dev_id; 3584 struct mmc_command *cmd; 3585 unsigned long flags; 3586 u32 isr; 3587 3588 while (!sdhci_request_done(host)) 3589 ; 3590 3591 spin_lock_irqsave(&host->lock, flags); 3592 3593 isr = host->thread_isr; 3594 host->thread_isr = 0; 3595 3596 cmd = host->deferred_cmd; 3597 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3598 sdhci_finish_mrq(host, cmd->mrq); 3599 3600 spin_unlock_irqrestore(&host->lock, flags); 3601 3602 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3603 struct mmc_host *mmc = host->mmc; 3604 3605 mmc->ops->card_event(mmc); 3606 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3607 } 3608 3609 return IRQ_HANDLED; 3610 } 3611 3612 /*****************************************************************************\ 3613 * * 3614 * Suspend/resume * 3615 * * 3616 \*****************************************************************************/ 3617 3618 #ifdef CONFIG_PM 3619 3620 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3621 { 3622 return mmc_card_is_removable(host->mmc) && 3623 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3624 !mmc_can_gpio_cd(host->mmc); 3625 } 3626 3627 /* 3628 * To enable wakeup events, the corresponding events have to be enabled in 3629 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3630 * Table' in the SD Host Controller Standard Specification. 3631 * It is useless to restore SDHCI_INT_ENABLE state in 3632 * sdhci_disable_irq_wakeups() since it will be set by 3633 * sdhci_enable_card_detection() or sdhci_init(). 3634 */ 3635 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3636 { 3637 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3638 SDHCI_WAKE_ON_INT; 3639 u32 irq_val = 0; 3640 u8 wake_val = 0; 3641 u8 val; 3642 3643 if (sdhci_cd_irq_can_wakeup(host)) { 3644 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3645 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3646 } 3647 3648 if (mmc_card_wake_sdio_irq(host->mmc)) { 3649 wake_val |= SDHCI_WAKE_ON_INT; 3650 irq_val |= SDHCI_INT_CARD_INT; 3651 } 3652 3653 if (!irq_val) 3654 return false; 3655 3656 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3657 val &= ~mask; 3658 val |= wake_val; 3659 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3660 3661 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3662 3663 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3664 3665 return host->irq_wake_enabled; 3666 } 3667 3668 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3669 { 3670 u8 val; 3671 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3672 | SDHCI_WAKE_ON_INT; 3673 3674 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3675 val &= ~mask; 3676 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3677 3678 disable_irq_wake(host->irq); 3679 3680 host->irq_wake_enabled = false; 3681 } 3682 3683 int sdhci_suspend_host(struct sdhci_host *host) 3684 { 3685 sdhci_disable_card_detection(host); 3686 3687 mmc_retune_timer_stop(host->mmc); 3688 3689 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3690 !sdhci_enable_irq_wakeups(host)) { 3691 host->ier = 0; 3692 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3693 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3694 free_irq(host->irq, host); 3695 } 3696 3697 return 0; 3698 } 3699 3700 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3701 3702 int sdhci_resume_host(struct sdhci_host *host) 3703 { 3704 struct mmc_host *mmc = host->mmc; 3705 int ret = 0; 3706 3707 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3708 if (host->ops->enable_dma) 3709 host->ops->enable_dma(host); 3710 } 3711 3712 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3713 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3714 /* Card keeps power but host controller does not */ 3715 sdhci_init(host, 0); 3716 host->pwr = 0; 3717 host->clock = 0; 3718 mmc->ops->set_ios(mmc, &mmc->ios); 3719 } else { 3720 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3721 } 3722 3723 if (host->irq_wake_enabled) { 3724 sdhci_disable_irq_wakeups(host); 3725 } else { 3726 ret = request_threaded_irq(host->irq, sdhci_irq, 3727 sdhci_thread_irq, IRQF_SHARED, 3728 mmc_hostname(mmc), host); 3729 if (ret) 3730 return ret; 3731 } 3732 3733 sdhci_enable_card_detection(host); 3734 3735 return ret; 3736 } 3737 3738 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3739 3740 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3741 { 3742 unsigned long flags; 3743 3744 mmc_retune_timer_stop(host->mmc); 3745 3746 spin_lock_irqsave(&host->lock, flags); 3747 host->ier &= SDHCI_INT_CARD_INT; 3748 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3749 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3750 spin_unlock_irqrestore(&host->lock, flags); 3751 3752 synchronize_hardirq(host->irq); 3753 3754 spin_lock_irqsave(&host->lock, flags); 3755 host->runtime_suspended = true; 3756 spin_unlock_irqrestore(&host->lock, flags); 3757 3758 return 0; 3759 } 3760 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3761 3762 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3763 { 3764 struct mmc_host *mmc = host->mmc; 3765 unsigned long flags; 3766 int host_flags = host->flags; 3767 3768 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3769 if (host->ops->enable_dma) 3770 host->ops->enable_dma(host); 3771 } 3772 3773 sdhci_init(host, soft_reset); 3774 3775 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3776 mmc->ios.power_mode != MMC_POWER_OFF) { 3777 /* Force clock and power re-program */ 3778 host->pwr = 0; 3779 host->clock = 0; 3780 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3781 mmc->ops->set_ios(mmc, &mmc->ios); 3782 3783 if ((host_flags & SDHCI_PV_ENABLED) && 3784 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3785 spin_lock_irqsave(&host->lock, flags); 3786 sdhci_enable_preset_value(host, true); 3787 spin_unlock_irqrestore(&host->lock, flags); 3788 } 3789 3790 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3791 mmc->ops->hs400_enhanced_strobe) 3792 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3793 } 3794 3795 spin_lock_irqsave(&host->lock, flags); 3796 3797 host->runtime_suspended = false; 3798 3799 /* Enable SDIO IRQ */ 3800 if (sdio_irq_claimed(mmc)) 3801 sdhci_enable_sdio_irq_nolock(host, true); 3802 3803 /* Enable Card Detection */ 3804 sdhci_enable_card_detection(host); 3805 3806 spin_unlock_irqrestore(&host->lock, flags); 3807 3808 return 0; 3809 } 3810 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3811 3812 #endif /* CONFIG_PM */ 3813 3814 /*****************************************************************************\ 3815 * * 3816 * Command Queue Engine (CQE) helpers * 3817 * * 3818 \*****************************************************************************/ 3819 3820 void sdhci_cqe_enable(struct mmc_host *mmc) 3821 { 3822 struct sdhci_host *host = mmc_priv(mmc); 3823 unsigned long flags; 3824 u8 ctrl; 3825 3826 spin_lock_irqsave(&host->lock, flags); 3827 3828 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3829 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3830 /* 3831 * Host from V4.10 supports ADMA3 DMA type. 3832 * ADMA3 performs integrated descriptor which is more suitable 3833 * for cmd queuing to fetch both command and transfer descriptors. 3834 */ 3835 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3836 ctrl |= SDHCI_CTRL_ADMA3; 3837 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3838 ctrl |= SDHCI_CTRL_ADMA64; 3839 else 3840 ctrl |= SDHCI_CTRL_ADMA32; 3841 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3842 3843 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3844 SDHCI_BLOCK_SIZE); 3845 3846 /* Set maximum timeout */ 3847 sdhci_set_timeout(host, NULL); 3848 3849 host->ier = host->cqe_ier; 3850 3851 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3852 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3853 3854 host->cqe_on = true; 3855 3856 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3857 mmc_hostname(mmc), host->ier, 3858 sdhci_readl(host, SDHCI_INT_STATUS)); 3859 3860 spin_unlock_irqrestore(&host->lock, flags); 3861 } 3862 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3863 3864 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3865 { 3866 struct sdhci_host *host = mmc_priv(mmc); 3867 unsigned long flags; 3868 3869 spin_lock_irqsave(&host->lock, flags); 3870 3871 sdhci_set_default_irqs(host); 3872 3873 host->cqe_on = false; 3874 3875 if (recovery) { 3876 sdhci_do_reset(host, SDHCI_RESET_CMD); 3877 sdhci_do_reset(host, SDHCI_RESET_DATA); 3878 } 3879 3880 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3881 mmc_hostname(mmc), host->ier, 3882 sdhci_readl(host, SDHCI_INT_STATUS)); 3883 3884 spin_unlock_irqrestore(&host->lock, flags); 3885 } 3886 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3887 3888 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3889 int *data_error) 3890 { 3891 u32 mask; 3892 3893 if (!host->cqe_on) 3894 return false; 3895 3896 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3897 *cmd_error = -EILSEQ; 3898 else if (intmask & SDHCI_INT_TIMEOUT) 3899 *cmd_error = -ETIMEDOUT; 3900 else 3901 *cmd_error = 0; 3902 3903 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3904 *data_error = -EILSEQ; 3905 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3906 *data_error = -ETIMEDOUT; 3907 else if (intmask & SDHCI_INT_ADMA_ERROR) 3908 *data_error = -EIO; 3909 else 3910 *data_error = 0; 3911 3912 /* Clear selected interrupts. */ 3913 mask = intmask & host->cqe_ier; 3914 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3915 3916 if (intmask & SDHCI_INT_BUS_POWER) 3917 pr_err("%s: Card is consuming too much power!\n", 3918 mmc_hostname(host->mmc)); 3919 3920 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3921 if (intmask) { 3922 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3923 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3924 mmc_hostname(host->mmc), intmask); 3925 sdhci_dumpregs(host); 3926 } 3927 3928 return true; 3929 } 3930 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3931 3932 /*****************************************************************************\ 3933 * * 3934 * Device allocation/registration * 3935 * * 3936 \*****************************************************************************/ 3937 3938 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3939 size_t priv_size) 3940 { 3941 struct mmc_host *mmc; 3942 struct sdhci_host *host; 3943 3944 WARN_ON(dev == NULL); 3945 3946 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3947 if (!mmc) 3948 return ERR_PTR(-ENOMEM); 3949 3950 host = mmc_priv(mmc); 3951 host->mmc = mmc; 3952 host->mmc_host_ops = sdhci_ops; 3953 mmc->ops = &host->mmc_host_ops; 3954 3955 host->flags = SDHCI_SIGNALING_330; 3956 3957 host->cqe_ier = SDHCI_CQE_INT_MASK; 3958 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3959 3960 host->tuning_delay = -1; 3961 host->tuning_loop_count = MAX_TUNING_LOOP; 3962 3963 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3964 3965 /* 3966 * The DMA table descriptor count is calculated as the maximum 3967 * number of segments times 2, to allow for an alignment 3968 * descriptor for each segment, plus 1 for a nop end descriptor. 3969 */ 3970 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 3971 3972 host->max_timeout_count = 0xE; 3973 3974 return host; 3975 } 3976 3977 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3978 3979 static int sdhci_set_dma_mask(struct sdhci_host *host) 3980 { 3981 struct mmc_host *mmc = host->mmc; 3982 struct device *dev = mmc_dev(mmc); 3983 int ret = -EINVAL; 3984 3985 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3986 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3987 3988 /* Try 64-bit mask if hardware is capable of it */ 3989 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3990 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3991 if (ret) { 3992 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3993 mmc_hostname(mmc)); 3994 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3995 } 3996 } 3997 3998 /* 32-bit mask as default & fallback */ 3999 if (ret) { 4000 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4001 if (ret) 4002 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4003 mmc_hostname(mmc)); 4004 } 4005 4006 return ret; 4007 } 4008 4009 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4010 const u32 *caps, const u32 *caps1) 4011 { 4012 u16 v; 4013 u64 dt_caps_mask = 0; 4014 u64 dt_caps = 0; 4015 4016 if (host->read_caps) 4017 return; 4018 4019 host->read_caps = true; 4020 4021 if (debug_quirks) 4022 host->quirks = debug_quirks; 4023 4024 if (debug_quirks2) 4025 host->quirks2 = debug_quirks2; 4026 4027 sdhci_do_reset(host, SDHCI_RESET_ALL); 4028 4029 if (host->v4_mode) 4030 sdhci_do_enable_v4_mode(host); 4031 4032 device_property_read_u64(mmc_dev(host->mmc), 4033 "sdhci-caps-mask", &dt_caps_mask); 4034 device_property_read_u64(mmc_dev(host->mmc), 4035 "sdhci-caps", &dt_caps); 4036 4037 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4038 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4039 4040 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 4041 return; 4042 4043 if (caps) { 4044 host->caps = *caps; 4045 } else { 4046 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4047 host->caps &= ~lower_32_bits(dt_caps_mask); 4048 host->caps |= lower_32_bits(dt_caps); 4049 } 4050 4051 if (host->version < SDHCI_SPEC_300) 4052 return; 4053 4054 if (caps1) { 4055 host->caps1 = *caps1; 4056 } else { 4057 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4058 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4059 host->caps1 |= upper_32_bits(dt_caps); 4060 } 4061 } 4062 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4063 4064 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4065 { 4066 struct mmc_host *mmc = host->mmc; 4067 unsigned int max_blocks; 4068 unsigned int bounce_size; 4069 int ret; 4070 4071 /* 4072 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4073 * has diminishing returns, this is probably because SD/MMC 4074 * cards are usually optimized to handle this size of requests. 4075 */ 4076 bounce_size = SZ_64K; 4077 /* 4078 * Adjust downwards to maximum request size if this is less 4079 * than our segment size, else hammer down the maximum 4080 * request size to the maximum buffer size. 4081 */ 4082 if (mmc->max_req_size < bounce_size) 4083 bounce_size = mmc->max_req_size; 4084 max_blocks = bounce_size / 512; 4085 4086 /* 4087 * When we just support one segment, we can get significant 4088 * speedups by the help of a bounce buffer to group scattered 4089 * reads/writes together. 4090 */ 4091 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4092 bounce_size, 4093 GFP_KERNEL); 4094 if (!host->bounce_buffer) { 4095 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4096 mmc_hostname(mmc), 4097 bounce_size); 4098 /* 4099 * Exiting with zero here makes sure we proceed with 4100 * mmc->max_segs == 1. 4101 */ 4102 return; 4103 } 4104 4105 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4106 host->bounce_buffer, 4107 bounce_size, 4108 DMA_BIDIRECTIONAL); 4109 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4110 if (ret) { 4111 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4112 host->bounce_buffer = NULL; 4113 /* Again fall back to max_segs == 1 */ 4114 return; 4115 } 4116 4117 host->bounce_buffer_size = bounce_size; 4118 4119 /* Lie about this since we're bouncing */ 4120 mmc->max_segs = max_blocks; 4121 mmc->max_seg_size = bounce_size; 4122 mmc->max_req_size = bounce_size; 4123 4124 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4125 mmc_hostname(mmc), max_blocks, bounce_size); 4126 } 4127 4128 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4129 { 4130 /* 4131 * According to SD Host Controller spec v4.10, bit[27] added from 4132 * version 4.10 in Capabilities Register is used as 64-bit System 4133 * Address support for V4 mode. 4134 */ 4135 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4136 return host->caps & SDHCI_CAN_64BIT_V4; 4137 4138 return host->caps & SDHCI_CAN_64BIT; 4139 } 4140 4141 int sdhci_setup_host(struct sdhci_host *host) 4142 { 4143 struct mmc_host *mmc; 4144 u32 max_current_caps; 4145 unsigned int ocr_avail; 4146 unsigned int override_timeout_clk; 4147 u32 max_clk; 4148 int ret = 0; 4149 bool enable_vqmmc = false; 4150 4151 WARN_ON(host == NULL); 4152 if (host == NULL) 4153 return -EINVAL; 4154 4155 mmc = host->mmc; 4156 4157 /* 4158 * If there are external regulators, get them. Note this must be done 4159 * early before resetting the host and reading the capabilities so that 4160 * the host can take the appropriate action if regulators are not 4161 * available. 4162 */ 4163 if (!mmc->supply.vqmmc) { 4164 ret = mmc_regulator_get_supply(mmc); 4165 if (ret) 4166 return ret; 4167 enable_vqmmc = true; 4168 } 4169 4170 DBG("Version: 0x%08x | Present: 0x%08x\n", 4171 sdhci_readw(host, SDHCI_HOST_VERSION), 4172 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4173 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4174 sdhci_readl(host, SDHCI_CAPABILITIES), 4175 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4176 4177 sdhci_read_caps(host); 4178 4179 override_timeout_clk = host->timeout_clk; 4180 4181 if (host->version > SDHCI_SPEC_420) { 4182 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4183 mmc_hostname(mmc), host->version); 4184 } 4185 4186 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4187 host->flags |= SDHCI_USE_SDMA; 4188 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4189 DBG("Controller doesn't have SDMA capability\n"); 4190 else 4191 host->flags |= SDHCI_USE_SDMA; 4192 4193 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4194 (host->flags & SDHCI_USE_SDMA)) { 4195 DBG("Disabling DMA as it is marked broken\n"); 4196 host->flags &= ~SDHCI_USE_SDMA; 4197 } 4198 4199 if ((host->version >= SDHCI_SPEC_200) && 4200 (host->caps & SDHCI_CAN_DO_ADMA2)) 4201 host->flags |= SDHCI_USE_ADMA; 4202 4203 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4204 (host->flags & SDHCI_USE_ADMA)) { 4205 DBG("Disabling ADMA as it is marked broken\n"); 4206 host->flags &= ~SDHCI_USE_ADMA; 4207 } 4208 4209 if (sdhci_can_64bit_dma(host)) 4210 host->flags |= SDHCI_USE_64_BIT_DMA; 4211 4212 if (host->use_external_dma) { 4213 ret = sdhci_external_dma_init(host); 4214 if (ret == -EPROBE_DEFER) 4215 goto unreg; 4216 /* 4217 * Fall back to use the DMA/PIO integrated in standard SDHCI 4218 * instead of external DMA devices. 4219 */ 4220 else if (ret) 4221 sdhci_switch_external_dma(host, false); 4222 /* Disable internal DMA sources */ 4223 else 4224 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4225 } 4226 4227 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4228 if (host->ops->set_dma_mask) 4229 ret = host->ops->set_dma_mask(host); 4230 else 4231 ret = sdhci_set_dma_mask(host); 4232 4233 if (!ret && host->ops->enable_dma) 4234 ret = host->ops->enable_dma(host); 4235 4236 if (ret) { 4237 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4238 mmc_hostname(mmc)); 4239 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4240 4241 ret = 0; 4242 } 4243 } 4244 4245 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4246 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4247 host->flags &= ~SDHCI_USE_SDMA; 4248 4249 if (host->flags & SDHCI_USE_ADMA) { 4250 dma_addr_t dma; 4251 void *buf; 4252 4253 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4254 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4255 else if (!host->alloc_desc_sz) 4256 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4257 4258 host->desc_sz = host->alloc_desc_sz; 4259 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4260 4261 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4262 /* 4263 * Use zalloc to zero the reserved high 32-bits of 128-bit 4264 * descriptors so that they never need to be written. 4265 */ 4266 buf = dma_alloc_coherent(mmc_dev(mmc), 4267 host->align_buffer_sz + host->adma_table_sz, 4268 &dma, GFP_KERNEL); 4269 if (!buf) { 4270 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4271 mmc_hostname(mmc)); 4272 host->flags &= ~SDHCI_USE_ADMA; 4273 } else if ((dma + host->align_buffer_sz) & 4274 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4275 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4276 mmc_hostname(mmc)); 4277 host->flags &= ~SDHCI_USE_ADMA; 4278 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4279 host->adma_table_sz, buf, dma); 4280 } else { 4281 host->align_buffer = buf; 4282 host->align_addr = dma; 4283 4284 host->adma_table = buf + host->align_buffer_sz; 4285 host->adma_addr = dma + host->align_buffer_sz; 4286 } 4287 } 4288 4289 /* 4290 * If we use DMA, then it's up to the caller to set the DMA 4291 * mask, but PIO does not need the hw shim so we set a new 4292 * mask here in that case. 4293 */ 4294 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4295 host->dma_mask = DMA_BIT_MASK(64); 4296 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4297 } 4298 4299 if (host->version >= SDHCI_SPEC_300) 4300 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4301 else 4302 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4303 4304 host->max_clk *= 1000000; 4305 if (host->max_clk == 0 || host->quirks & 4306 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4307 if (!host->ops->get_max_clock) { 4308 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4309 mmc_hostname(mmc)); 4310 ret = -ENODEV; 4311 goto undma; 4312 } 4313 host->max_clk = host->ops->get_max_clock(host); 4314 } 4315 4316 /* 4317 * In case of Host Controller v3.00, find out whether clock 4318 * multiplier is supported. 4319 */ 4320 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4321 4322 /* 4323 * In case the value in Clock Multiplier is 0, then programmable 4324 * clock mode is not supported, otherwise the actual clock 4325 * multiplier is one more than the value of Clock Multiplier 4326 * in the Capabilities Register. 4327 */ 4328 if (host->clk_mul) 4329 host->clk_mul += 1; 4330 4331 /* 4332 * Set host parameters. 4333 */ 4334 max_clk = host->max_clk; 4335 4336 if (host->ops->get_min_clock) 4337 mmc->f_min = host->ops->get_min_clock(host); 4338 else if (host->version >= SDHCI_SPEC_300) { 4339 if (host->clk_mul) 4340 max_clk = host->max_clk * host->clk_mul; 4341 /* 4342 * Divided Clock Mode minimum clock rate is always less than 4343 * Programmable Clock Mode minimum clock rate. 4344 */ 4345 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4346 } else 4347 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4348 4349 if (!mmc->f_max || mmc->f_max > max_clk) 4350 mmc->f_max = max_clk; 4351 4352 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4353 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4354 4355 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4356 host->timeout_clk *= 1000; 4357 4358 if (host->timeout_clk == 0) { 4359 if (!host->ops->get_timeout_clock) { 4360 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4361 mmc_hostname(mmc)); 4362 ret = -ENODEV; 4363 goto undma; 4364 } 4365 4366 host->timeout_clk = 4367 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4368 1000); 4369 } 4370 4371 if (override_timeout_clk) 4372 host->timeout_clk = override_timeout_clk; 4373 4374 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4375 host->ops->get_max_timeout_count(host) : 1 << 27; 4376 mmc->max_busy_timeout /= host->timeout_clk; 4377 } 4378 4379 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4380 !host->ops->get_max_timeout_count) 4381 mmc->max_busy_timeout = 0; 4382 4383 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4384 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4385 4386 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4387 host->flags |= SDHCI_AUTO_CMD12; 4388 4389 /* 4390 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4391 * For v4 mode, SDMA may use Auto-CMD23 as well. 4392 */ 4393 if ((host->version >= SDHCI_SPEC_300) && 4394 ((host->flags & SDHCI_USE_ADMA) || 4395 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4396 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4397 host->flags |= SDHCI_AUTO_CMD23; 4398 DBG("Auto-CMD23 available\n"); 4399 } else { 4400 DBG("Auto-CMD23 unavailable\n"); 4401 } 4402 4403 /* 4404 * A controller may support 8-bit width, but the board itself 4405 * might not have the pins brought out. Boards that support 4406 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4407 * their platform code before calling sdhci_add_host(), and we 4408 * won't assume 8-bit width for hosts without that CAP. 4409 */ 4410 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4411 mmc->caps |= MMC_CAP_4_BIT_DATA; 4412 4413 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4414 mmc->caps &= ~MMC_CAP_CMD23; 4415 4416 if (host->caps & SDHCI_CAN_DO_HISPD) 4417 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4418 4419 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4420 mmc_card_is_removable(mmc) && 4421 mmc_gpio_get_cd(mmc) < 0) 4422 mmc->caps |= MMC_CAP_NEEDS_POLL; 4423 4424 if (!IS_ERR(mmc->supply.vqmmc)) { 4425 if (enable_vqmmc) { 4426 ret = regulator_enable(mmc->supply.vqmmc); 4427 host->sdhci_core_to_disable_vqmmc = !ret; 4428 } 4429 4430 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4431 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4432 1950000)) 4433 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4434 SDHCI_SUPPORT_SDR50 | 4435 SDHCI_SUPPORT_DDR50); 4436 4437 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4438 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4439 3600000)) 4440 host->flags &= ~SDHCI_SIGNALING_330; 4441 4442 if (ret) { 4443 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4444 mmc_hostname(mmc), ret); 4445 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4446 } 4447 4448 } 4449 4450 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4451 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4452 SDHCI_SUPPORT_DDR50); 4453 /* 4454 * The SDHCI controller in a SoC might support HS200/HS400 4455 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4456 * but if the board is modeled such that the IO lines are not 4457 * connected to 1.8v then HS200/HS400 cannot be supported. 4458 * Disable HS200/HS400 if the board does not have 1.8v connected 4459 * to the IO lines. (Applicable for other modes in 1.8v) 4460 */ 4461 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4462 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4463 } 4464 4465 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4466 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4467 SDHCI_SUPPORT_DDR50)) 4468 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4469 4470 /* SDR104 supports also implies SDR50 support */ 4471 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4472 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4473 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4474 * field can be promoted to support HS200. 4475 */ 4476 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4477 mmc->caps2 |= MMC_CAP2_HS200; 4478 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4479 mmc->caps |= MMC_CAP_UHS_SDR50; 4480 } 4481 4482 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4483 (host->caps1 & SDHCI_SUPPORT_HS400)) 4484 mmc->caps2 |= MMC_CAP2_HS400; 4485 4486 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4487 (IS_ERR(mmc->supply.vqmmc) || 4488 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4489 1300000))) 4490 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4491 4492 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4493 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4494 mmc->caps |= MMC_CAP_UHS_DDR50; 4495 4496 /* Does the host need tuning for SDR50? */ 4497 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4498 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4499 4500 /* Driver Type(s) (A, C, D) supported by the host */ 4501 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4502 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4503 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4504 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4505 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4506 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4507 4508 /* Initial value for re-tuning timer count */ 4509 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4510 host->caps1); 4511 4512 /* 4513 * In case Re-tuning Timer is not disabled, the actual value of 4514 * re-tuning timer will be 2 ^ (n - 1). 4515 */ 4516 if (host->tuning_count) 4517 host->tuning_count = 1 << (host->tuning_count - 1); 4518 4519 /* Re-tuning mode supported by the Host Controller */ 4520 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4521 4522 ocr_avail = 0; 4523 4524 /* 4525 * According to SD Host Controller spec v3.00, if the Host System 4526 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4527 * the value is meaningful only if Voltage Support in the Capabilities 4528 * register is set. The actual current value is 4 times the register 4529 * value. 4530 */ 4531 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4532 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4533 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4534 if (curr > 0) { 4535 4536 /* convert to SDHCI_MAX_CURRENT format */ 4537 curr = curr/1000; /* convert to mA */ 4538 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4539 4540 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4541 max_current_caps = 4542 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4543 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4544 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4545 } 4546 } 4547 4548 if (host->caps & SDHCI_CAN_VDD_330) { 4549 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4550 4551 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4552 max_current_caps) * 4553 SDHCI_MAX_CURRENT_MULTIPLIER; 4554 } 4555 if (host->caps & SDHCI_CAN_VDD_300) { 4556 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4557 4558 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4559 max_current_caps) * 4560 SDHCI_MAX_CURRENT_MULTIPLIER; 4561 } 4562 if (host->caps & SDHCI_CAN_VDD_180) { 4563 ocr_avail |= MMC_VDD_165_195; 4564 4565 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4566 max_current_caps) * 4567 SDHCI_MAX_CURRENT_MULTIPLIER; 4568 } 4569 4570 /* If OCR set by host, use it instead. */ 4571 if (host->ocr_mask) 4572 ocr_avail = host->ocr_mask; 4573 4574 /* If OCR set by external regulators, give it highest prio. */ 4575 if (mmc->ocr_avail) 4576 ocr_avail = mmc->ocr_avail; 4577 4578 mmc->ocr_avail = ocr_avail; 4579 mmc->ocr_avail_sdio = ocr_avail; 4580 if (host->ocr_avail_sdio) 4581 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4582 mmc->ocr_avail_sd = ocr_avail; 4583 if (host->ocr_avail_sd) 4584 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4585 else /* normal SD controllers don't support 1.8V */ 4586 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4587 mmc->ocr_avail_mmc = ocr_avail; 4588 if (host->ocr_avail_mmc) 4589 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4590 4591 if (mmc->ocr_avail == 0) { 4592 pr_err("%s: Hardware doesn't report any support voltages.\n", 4593 mmc_hostname(mmc)); 4594 ret = -ENODEV; 4595 goto unreg; 4596 } 4597 4598 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4599 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4600 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4601 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4602 host->flags |= SDHCI_SIGNALING_180; 4603 4604 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4605 host->flags |= SDHCI_SIGNALING_120; 4606 4607 spin_lock_init(&host->lock); 4608 4609 /* 4610 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4611 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4612 * is less anyway. 4613 */ 4614 mmc->max_req_size = 524288; 4615 4616 /* 4617 * Maximum number of segments. Depends on if the hardware 4618 * can do scatter/gather or not. 4619 */ 4620 if (host->flags & SDHCI_USE_ADMA) { 4621 mmc->max_segs = SDHCI_MAX_SEGS; 4622 } else if (host->flags & SDHCI_USE_SDMA) { 4623 mmc->max_segs = 1; 4624 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4625 dma_max_mapping_size(mmc_dev(mmc))); 4626 } else { /* PIO */ 4627 mmc->max_segs = SDHCI_MAX_SEGS; 4628 } 4629 4630 /* 4631 * Maximum segment size. Could be one segment with the maximum number 4632 * of bytes. When doing hardware scatter/gather, each entry cannot 4633 * be larger than 64 KiB though. 4634 */ 4635 if (host->flags & SDHCI_USE_ADMA) { 4636 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 4637 mmc->max_seg_size = 65535; 4638 else 4639 mmc->max_seg_size = 65536; 4640 } else { 4641 mmc->max_seg_size = mmc->max_req_size; 4642 } 4643 4644 /* 4645 * Maximum block size. This varies from controller to controller and 4646 * is specified in the capabilities register. 4647 */ 4648 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4649 mmc->max_blk_size = 2; 4650 } else { 4651 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4652 SDHCI_MAX_BLOCK_SHIFT; 4653 if (mmc->max_blk_size >= 3) { 4654 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4655 mmc_hostname(mmc)); 4656 mmc->max_blk_size = 0; 4657 } 4658 } 4659 4660 mmc->max_blk_size = 512 << mmc->max_blk_size; 4661 4662 /* 4663 * Maximum block count. 4664 */ 4665 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4666 4667 if (mmc->max_segs == 1) 4668 /* This may alter mmc->*_blk_* parameters */ 4669 sdhci_allocate_bounce_buffer(host); 4670 4671 return 0; 4672 4673 unreg: 4674 if (host->sdhci_core_to_disable_vqmmc) 4675 regulator_disable(mmc->supply.vqmmc); 4676 undma: 4677 if (host->align_buffer) 4678 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4679 host->adma_table_sz, host->align_buffer, 4680 host->align_addr); 4681 host->adma_table = NULL; 4682 host->align_buffer = NULL; 4683 4684 return ret; 4685 } 4686 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4687 4688 void sdhci_cleanup_host(struct sdhci_host *host) 4689 { 4690 struct mmc_host *mmc = host->mmc; 4691 4692 if (host->sdhci_core_to_disable_vqmmc) 4693 regulator_disable(mmc->supply.vqmmc); 4694 4695 if (host->align_buffer) 4696 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4697 host->adma_table_sz, host->align_buffer, 4698 host->align_addr); 4699 4700 if (host->use_external_dma) 4701 sdhci_external_dma_release(host); 4702 4703 host->adma_table = NULL; 4704 host->align_buffer = NULL; 4705 } 4706 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4707 4708 int __sdhci_add_host(struct sdhci_host *host) 4709 { 4710 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4711 struct mmc_host *mmc = host->mmc; 4712 int ret; 4713 4714 if ((mmc->caps2 & MMC_CAP2_CQE) && 4715 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4716 mmc->caps2 &= ~MMC_CAP2_CQE; 4717 mmc->cqe_ops = NULL; 4718 } 4719 4720 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4721 if (!host->complete_wq) 4722 return -ENOMEM; 4723 4724 INIT_WORK(&host->complete_work, sdhci_complete_work); 4725 4726 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4727 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4728 4729 init_waitqueue_head(&host->buf_ready_int); 4730 4731 sdhci_init(host, 0); 4732 4733 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4734 IRQF_SHARED, mmc_hostname(mmc), host); 4735 if (ret) { 4736 pr_err("%s: Failed to request IRQ %d: %d\n", 4737 mmc_hostname(mmc), host->irq, ret); 4738 goto unwq; 4739 } 4740 4741 ret = sdhci_led_register(host); 4742 if (ret) { 4743 pr_err("%s: Failed to register LED device: %d\n", 4744 mmc_hostname(mmc), ret); 4745 goto unirq; 4746 } 4747 4748 ret = mmc_add_host(mmc); 4749 if (ret) 4750 goto unled; 4751 4752 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4753 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4754 host->use_external_dma ? "External DMA" : 4755 (host->flags & SDHCI_USE_ADMA) ? 4756 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4757 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4758 4759 sdhci_enable_card_detection(host); 4760 4761 return 0; 4762 4763 unled: 4764 sdhci_led_unregister(host); 4765 unirq: 4766 sdhci_do_reset(host, SDHCI_RESET_ALL); 4767 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4768 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4769 free_irq(host->irq, host); 4770 unwq: 4771 destroy_workqueue(host->complete_wq); 4772 4773 return ret; 4774 } 4775 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4776 4777 int sdhci_add_host(struct sdhci_host *host) 4778 { 4779 int ret; 4780 4781 ret = sdhci_setup_host(host); 4782 if (ret) 4783 return ret; 4784 4785 ret = __sdhci_add_host(host); 4786 if (ret) 4787 goto cleanup; 4788 4789 return 0; 4790 4791 cleanup: 4792 sdhci_cleanup_host(host); 4793 4794 return ret; 4795 } 4796 EXPORT_SYMBOL_GPL(sdhci_add_host); 4797 4798 void sdhci_remove_host(struct sdhci_host *host, int dead) 4799 { 4800 struct mmc_host *mmc = host->mmc; 4801 unsigned long flags; 4802 4803 if (dead) { 4804 spin_lock_irqsave(&host->lock, flags); 4805 4806 host->flags |= SDHCI_DEVICE_DEAD; 4807 4808 if (sdhci_has_requests(host)) { 4809 pr_err("%s: Controller removed during " 4810 " transfer!\n", mmc_hostname(mmc)); 4811 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4812 } 4813 4814 spin_unlock_irqrestore(&host->lock, flags); 4815 } 4816 4817 sdhci_disable_card_detection(host); 4818 4819 mmc_remove_host(mmc); 4820 4821 sdhci_led_unregister(host); 4822 4823 if (!dead) 4824 sdhci_do_reset(host, SDHCI_RESET_ALL); 4825 4826 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4827 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4828 free_irq(host->irq, host); 4829 4830 del_timer_sync(&host->timer); 4831 del_timer_sync(&host->data_timer); 4832 4833 destroy_workqueue(host->complete_wq); 4834 4835 if (host->sdhci_core_to_disable_vqmmc) 4836 regulator_disable(mmc->supply.vqmmc); 4837 4838 if (host->align_buffer) 4839 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4840 host->adma_table_sz, host->align_buffer, 4841 host->align_addr); 4842 4843 if (host->use_external_dma) 4844 sdhci_external_dma_release(host); 4845 4846 host->adma_table = NULL; 4847 host->align_buffer = NULL; 4848 } 4849 4850 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4851 4852 void sdhci_free_host(struct sdhci_host *host) 4853 { 4854 mmc_free_host(host->mmc); 4855 } 4856 4857 EXPORT_SYMBOL_GPL(sdhci_free_host); 4858 4859 /*****************************************************************************\ 4860 * * 4861 * Driver init/exit * 4862 * * 4863 \*****************************************************************************/ 4864 4865 static int __init sdhci_drv_init(void) 4866 { 4867 pr_info(DRIVER_NAME 4868 ": Secure Digital Host Controller Interface driver\n"); 4869 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4870 4871 return 0; 4872 } 4873 4874 static void __exit sdhci_drv_exit(void) 4875 { 4876 } 4877 4878 module_init(sdhci_drv_init); 4879 module_exit(sdhci_drv_exit); 4880 4881 module_param(debug_quirks, uint, 0444); 4882 module_param(debug_quirks2, uint, 0444); 4883 4884 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4885 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4886 MODULE_LICENSE("GPL"); 4887 4888 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4889 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4890