1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/ktime.h> 18 #include <linux/highmem.h> 19 #include <linux/io.h> 20 #include <linux/module.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/slab.h> 23 #include <linux/scatterlist.h> 24 #include <linux/sizes.h> 25 #include <linux/swiotlb.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/of.h> 29 30 #include <linux/leds.h> 31 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/host.h> 34 #include <linux/mmc/card.h> 35 #include <linux/mmc/sdio.h> 36 #include <linux/mmc/slot-gpio.h> 37 38 #include "sdhci.h" 39 40 #define DRIVER_NAME "sdhci" 41 42 #define DBG(f, x...) \ 43 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define SDHCI_DUMP(f, x...) \ 46 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 47 48 #define MAX_TUNING_LOOP 40 49 50 static unsigned int debug_quirks = 0; 51 static unsigned int debug_quirks2; 52 53 static void sdhci_finish_data(struct sdhci_host *); 54 55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 56 57 void sdhci_dumpregs(struct sdhci_host *host) 58 { 59 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 60 61 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 62 sdhci_readl(host, SDHCI_DMA_ADDRESS), 63 sdhci_readw(host, SDHCI_HOST_VERSION)); 64 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 65 sdhci_readw(host, SDHCI_BLOCK_SIZE), 66 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 67 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 68 sdhci_readl(host, SDHCI_ARGUMENT), 69 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 70 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 71 sdhci_readl(host, SDHCI_PRESENT_STATE), 72 sdhci_readb(host, SDHCI_HOST_CONTROL)); 73 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 74 sdhci_readb(host, SDHCI_POWER_CONTROL), 75 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 76 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 77 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 78 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 79 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 80 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 81 sdhci_readl(host, SDHCI_INT_STATUS)); 82 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 83 sdhci_readl(host, SDHCI_INT_ENABLE), 84 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 85 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n", 86 sdhci_readw(host, SDHCI_ACMD12_ERR), 87 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 88 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 89 sdhci_readl(host, SDHCI_CAPABILITIES), 90 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 91 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 92 sdhci_readw(host, SDHCI_COMMAND), 93 sdhci_readl(host, SDHCI_MAX_CURRENT)); 94 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE), 96 sdhci_readl(host, SDHCI_RESPONSE + 4)); 97 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 98 sdhci_readl(host, SDHCI_RESPONSE + 8), 99 sdhci_readl(host, SDHCI_RESPONSE + 12)); 100 SDHCI_DUMP("Host ctl2: 0x%08x\n", 101 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 102 103 if (host->flags & SDHCI_USE_ADMA) { 104 if (host->flags & SDHCI_USE_64_BIT_DMA) { 105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 106 sdhci_readl(host, SDHCI_ADMA_ERROR), 107 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 108 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 109 } else { 110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 111 sdhci_readl(host, SDHCI_ADMA_ERROR), 112 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 113 } 114 } 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(host->mmc->parent); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(host->mmc->parent); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 220 if (ktime_after(ktime_get(), timeout)) { 221 pr_err("%s: Reset 0x%x never completed.\n", 222 mmc_hostname(host->mmc), (int)mask); 223 sdhci_dumpregs(host); 224 return; 225 } 226 udelay(10); 227 } 228 } 229 EXPORT_SYMBOL_GPL(sdhci_reset); 230 231 static void sdhci_do_reset(struct sdhci_host *host, u8 mask) 232 { 233 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 234 struct mmc_host *mmc = host->mmc; 235 236 if (!mmc->ops->get_cd(mmc)) 237 return; 238 } 239 240 host->ops->reset(host, mask); 241 242 if (mask & SDHCI_RESET_ALL) { 243 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 244 if (host->ops->enable_dma) 245 host->ops->enable_dma(host); 246 } 247 248 /* Resetting the controller clears many */ 249 host->preset_enabled = false; 250 } 251 } 252 253 static void sdhci_set_default_irqs(struct sdhci_host *host) 254 { 255 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 256 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 257 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 258 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 259 SDHCI_INT_RESPONSE; 260 261 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 262 host->tuning_mode == SDHCI_TUNING_MODE_3) 263 host->ier |= SDHCI_INT_RETUNE; 264 265 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 266 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 267 } 268 269 static void sdhci_config_dma(struct sdhci_host *host) 270 { 271 u8 ctrl; 272 u16 ctrl2; 273 274 if (host->version < SDHCI_SPEC_200) 275 return; 276 277 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 278 279 /* 280 * Always adjust the DMA selection as some controllers 281 * (e.g. JMicron) can't do PIO properly when the selection 282 * is ADMA. 283 */ 284 ctrl &= ~SDHCI_CTRL_DMA_MASK; 285 if (!(host->flags & SDHCI_REQ_USE_DMA)) 286 goto out; 287 288 /* Note if DMA Select is zero then SDMA is selected */ 289 if (host->flags & SDHCI_USE_ADMA) 290 ctrl |= SDHCI_CTRL_ADMA32; 291 292 if (host->flags & SDHCI_USE_64_BIT_DMA) { 293 /* 294 * If v4 mode, all supported DMA can be 64-bit addressing if 295 * controller supports 64-bit system address, otherwise only 296 * ADMA can support 64-bit addressing. 297 */ 298 if (host->v4_mode) { 299 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 300 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 301 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 302 } else if (host->flags & SDHCI_USE_ADMA) { 303 /* 304 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 305 * set SDHCI_CTRL_ADMA64. 306 */ 307 ctrl |= SDHCI_CTRL_ADMA64; 308 } 309 } 310 311 out: 312 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 313 } 314 315 static void sdhci_init(struct sdhci_host *host, int soft) 316 { 317 struct mmc_host *mmc = host->mmc; 318 319 if (soft) 320 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 321 else 322 sdhci_do_reset(host, SDHCI_RESET_ALL); 323 324 if (host->v4_mode) 325 sdhci_do_enable_v4_mode(host); 326 327 sdhci_set_default_irqs(host); 328 329 host->cqe_on = false; 330 331 if (soft) { 332 /* force clock reconfiguration */ 333 host->clock = 0; 334 mmc->ops->set_ios(mmc, &mmc->ios); 335 } 336 } 337 338 static void sdhci_reinit(struct sdhci_host *host) 339 { 340 sdhci_init(host, 0); 341 sdhci_enable_card_detection(host); 342 } 343 344 static void __sdhci_led_activate(struct sdhci_host *host) 345 { 346 u8 ctrl; 347 348 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 349 ctrl |= SDHCI_CTRL_LED; 350 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 351 } 352 353 static void __sdhci_led_deactivate(struct sdhci_host *host) 354 { 355 u8 ctrl; 356 357 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 358 ctrl &= ~SDHCI_CTRL_LED; 359 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 360 } 361 362 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 363 static void sdhci_led_control(struct led_classdev *led, 364 enum led_brightness brightness) 365 { 366 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 367 unsigned long flags; 368 369 spin_lock_irqsave(&host->lock, flags); 370 371 if (host->runtime_suspended) 372 goto out; 373 374 if (brightness == LED_OFF) 375 __sdhci_led_deactivate(host); 376 else 377 __sdhci_led_activate(host); 378 out: 379 spin_unlock_irqrestore(&host->lock, flags); 380 } 381 382 static int sdhci_led_register(struct sdhci_host *host) 383 { 384 struct mmc_host *mmc = host->mmc; 385 386 snprintf(host->led_name, sizeof(host->led_name), 387 "%s::", mmc_hostname(mmc)); 388 389 host->led.name = host->led_name; 390 host->led.brightness = LED_OFF; 391 host->led.default_trigger = mmc_hostname(mmc); 392 host->led.brightness_set = sdhci_led_control; 393 394 return led_classdev_register(mmc_dev(mmc), &host->led); 395 } 396 397 static void sdhci_led_unregister(struct sdhci_host *host) 398 { 399 led_classdev_unregister(&host->led); 400 } 401 402 static inline void sdhci_led_activate(struct sdhci_host *host) 403 { 404 } 405 406 static inline void sdhci_led_deactivate(struct sdhci_host *host) 407 { 408 } 409 410 #else 411 412 static inline int sdhci_led_register(struct sdhci_host *host) 413 { 414 return 0; 415 } 416 417 static inline void sdhci_led_unregister(struct sdhci_host *host) 418 { 419 } 420 421 static inline void sdhci_led_activate(struct sdhci_host *host) 422 { 423 __sdhci_led_activate(host); 424 } 425 426 static inline void sdhci_led_deactivate(struct sdhci_host *host) 427 { 428 __sdhci_led_deactivate(host); 429 } 430 431 #endif 432 433 /*****************************************************************************\ 434 * * 435 * Core functions * 436 * * 437 \*****************************************************************************/ 438 439 static void sdhci_read_block_pio(struct sdhci_host *host) 440 { 441 unsigned long flags; 442 size_t blksize, len, chunk; 443 u32 uninitialized_var(scratch); 444 u8 *buf; 445 446 DBG("PIO reading\n"); 447 448 blksize = host->data->blksz; 449 chunk = 0; 450 451 local_irq_save(flags); 452 453 while (blksize) { 454 BUG_ON(!sg_miter_next(&host->sg_miter)); 455 456 len = min(host->sg_miter.length, blksize); 457 458 blksize -= len; 459 host->sg_miter.consumed = len; 460 461 buf = host->sg_miter.addr; 462 463 while (len) { 464 if (chunk == 0) { 465 scratch = sdhci_readl(host, SDHCI_BUFFER); 466 chunk = 4; 467 } 468 469 *buf = scratch & 0xFF; 470 471 buf++; 472 scratch >>= 8; 473 chunk--; 474 len--; 475 } 476 } 477 478 sg_miter_stop(&host->sg_miter); 479 480 local_irq_restore(flags); 481 } 482 483 static void sdhci_write_block_pio(struct sdhci_host *host) 484 { 485 unsigned long flags; 486 size_t blksize, len, chunk; 487 u32 scratch; 488 u8 *buf; 489 490 DBG("PIO writing\n"); 491 492 blksize = host->data->blksz; 493 chunk = 0; 494 scratch = 0; 495 496 local_irq_save(flags); 497 498 while (blksize) { 499 BUG_ON(!sg_miter_next(&host->sg_miter)); 500 501 len = min(host->sg_miter.length, blksize); 502 503 blksize -= len; 504 host->sg_miter.consumed = len; 505 506 buf = host->sg_miter.addr; 507 508 while (len) { 509 scratch |= (u32)*buf << (chunk * 8); 510 511 buf++; 512 chunk++; 513 len--; 514 515 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 516 sdhci_writel(host, scratch, SDHCI_BUFFER); 517 chunk = 0; 518 scratch = 0; 519 } 520 } 521 } 522 523 sg_miter_stop(&host->sg_miter); 524 525 local_irq_restore(flags); 526 } 527 528 static void sdhci_transfer_pio(struct sdhci_host *host) 529 { 530 u32 mask; 531 532 if (host->blocks == 0) 533 return; 534 535 if (host->data->flags & MMC_DATA_READ) 536 mask = SDHCI_DATA_AVAILABLE; 537 else 538 mask = SDHCI_SPACE_AVAILABLE; 539 540 /* 541 * Some controllers (JMicron JMB38x) mess up the buffer bits 542 * for transfers < 4 bytes. As long as it is just one block, 543 * we can ignore the bits. 544 */ 545 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 546 (host->data->blocks == 1)) 547 mask = ~0; 548 549 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 550 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 551 udelay(100); 552 553 if (host->data->flags & MMC_DATA_READ) 554 sdhci_read_block_pio(host); 555 else 556 sdhci_write_block_pio(host); 557 558 host->blocks--; 559 if (host->blocks == 0) 560 break; 561 } 562 563 DBG("PIO transfer complete.\n"); 564 } 565 566 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 567 struct mmc_data *data, int cookie) 568 { 569 int sg_count; 570 571 /* 572 * If the data buffers are already mapped, return the previous 573 * dma_map_sg() result. 574 */ 575 if (data->host_cookie == COOKIE_PRE_MAPPED) 576 return data->sg_count; 577 578 /* Bounce write requests to the bounce buffer */ 579 if (host->bounce_buffer) { 580 unsigned int length = data->blksz * data->blocks; 581 582 if (length > host->bounce_buffer_size) { 583 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 584 mmc_hostname(host->mmc), length, 585 host->bounce_buffer_size); 586 return -EIO; 587 } 588 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 589 /* Copy the data to the bounce buffer */ 590 sg_copy_to_buffer(data->sg, data->sg_len, 591 host->bounce_buffer, 592 length); 593 } 594 /* Switch ownership to the DMA */ 595 dma_sync_single_for_device(host->mmc->parent, 596 host->bounce_addr, 597 host->bounce_buffer_size, 598 mmc_get_dma_dir(data)); 599 /* Just a dummy value */ 600 sg_count = 1; 601 } else { 602 /* Just access the data directly from memory */ 603 sg_count = dma_map_sg(mmc_dev(host->mmc), 604 data->sg, data->sg_len, 605 mmc_get_dma_dir(data)); 606 } 607 608 if (sg_count == 0) 609 return -ENOSPC; 610 611 data->sg_count = sg_count; 612 data->host_cookie = cookie; 613 614 return sg_count; 615 } 616 617 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 618 { 619 local_irq_save(*flags); 620 return kmap_atomic(sg_page(sg)) + sg->offset; 621 } 622 623 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 624 { 625 kunmap_atomic(buffer); 626 local_irq_restore(*flags); 627 } 628 629 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 630 dma_addr_t addr, int len, unsigned int cmd) 631 { 632 struct sdhci_adma2_64_desc *dma_desc = *desc; 633 634 /* 32-bit and 64-bit descriptors have these members in same position */ 635 dma_desc->cmd = cpu_to_le16(cmd); 636 dma_desc->len = cpu_to_le16(len); 637 dma_desc->addr_lo = cpu_to_le32((u32)addr); 638 639 if (host->flags & SDHCI_USE_64_BIT_DMA) 640 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); 641 642 *desc += host->desc_sz; 643 } 644 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 645 646 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 647 void **desc, dma_addr_t addr, 648 int len, unsigned int cmd) 649 { 650 if (host->ops->adma_write_desc) 651 host->ops->adma_write_desc(host, desc, addr, len, cmd); 652 else 653 sdhci_adma_write_desc(host, desc, addr, len, cmd); 654 } 655 656 static void sdhci_adma_mark_end(void *desc) 657 { 658 struct sdhci_adma2_64_desc *dma_desc = desc; 659 660 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 661 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 662 } 663 664 static void sdhci_adma_table_pre(struct sdhci_host *host, 665 struct mmc_data *data, int sg_count) 666 { 667 struct scatterlist *sg; 668 unsigned long flags; 669 dma_addr_t addr, align_addr; 670 void *desc, *align; 671 char *buffer; 672 int len, offset, i; 673 674 /* 675 * The spec does not specify endianness of descriptor table. 676 * We currently guess that it is LE. 677 */ 678 679 host->sg_count = sg_count; 680 681 desc = host->adma_table; 682 align = host->align_buffer; 683 684 align_addr = host->align_addr; 685 686 for_each_sg(data->sg, sg, host->sg_count, i) { 687 addr = sg_dma_address(sg); 688 len = sg_dma_len(sg); 689 690 /* 691 * The SDHCI specification states that ADMA addresses must 692 * be 32-bit aligned. If they aren't, then we use a bounce 693 * buffer for the (up to three) bytes that screw up the 694 * alignment. 695 */ 696 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 697 SDHCI_ADMA2_MASK; 698 if (offset) { 699 if (data->flags & MMC_DATA_WRITE) { 700 buffer = sdhci_kmap_atomic(sg, &flags); 701 memcpy(align, buffer, offset); 702 sdhci_kunmap_atomic(buffer, &flags); 703 } 704 705 /* tran, valid */ 706 __sdhci_adma_write_desc(host, &desc, align_addr, 707 offset, ADMA2_TRAN_VALID); 708 709 BUG_ON(offset > 65536); 710 711 align += SDHCI_ADMA2_ALIGN; 712 align_addr += SDHCI_ADMA2_ALIGN; 713 714 addr += offset; 715 len -= offset; 716 } 717 718 BUG_ON(len > 65536); 719 720 /* tran, valid */ 721 if (len) 722 __sdhci_adma_write_desc(host, &desc, addr, len, 723 ADMA2_TRAN_VALID); 724 725 /* 726 * If this triggers then we have a calculation bug 727 * somewhere. :/ 728 */ 729 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 730 } 731 732 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 733 /* Mark the last descriptor as the terminating descriptor */ 734 if (desc != host->adma_table) { 735 desc -= host->desc_sz; 736 sdhci_adma_mark_end(desc); 737 } 738 } else { 739 /* Add a terminating entry - nop, end, valid */ 740 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 741 } 742 } 743 744 static void sdhci_adma_table_post(struct sdhci_host *host, 745 struct mmc_data *data) 746 { 747 struct scatterlist *sg; 748 int i, size; 749 void *align; 750 char *buffer; 751 unsigned long flags; 752 753 if (data->flags & MMC_DATA_READ) { 754 bool has_unaligned = false; 755 756 /* Do a quick scan of the SG list for any unaligned mappings */ 757 for_each_sg(data->sg, sg, host->sg_count, i) 758 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 759 has_unaligned = true; 760 break; 761 } 762 763 if (has_unaligned) { 764 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 765 data->sg_len, DMA_FROM_DEVICE); 766 767 align = host->align_buffer; 768 769 for_each_sg(data->sg, sg, host->sg_count, i) { 770 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 771 size = SDHCI_ADMA2_ALIGN - 772 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 773 774 buffer = sdhci_kmap_atomic(sg, &flags); 775 memcpy(buffer, align, size); 776 sdhci_kunmap_atomic(buffer, &flags); 777 778 align += SDHCI_ADMA2_ALIGN; 779 } 780 } 781 } 782 } 783 } 784 785 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 786 { 787 if (host->bounce_buffer) 788 return host->bounce_addr; 789 else 790 return sg_dma_address(host->data->sg); 791 } 792 793 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 794 { 795 if (host->v4_mode) { 796 sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS); 797 if (host->flags & SDHCI_USE_64_BIT_DMA) 798 sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI); 799 } else { 800 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 801 } 802 } 803 804 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 805 struct mmc_command *cmd, 806 struct mmc_data *data) 807 { 808 unsigned int target_timeout; 809 810 /* timeout in us */ 811 if (!data) { 812 target_timeout = cmd->busy_timeout * 1000; 813 } else { 814 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 815 if (host->clock && data->timeout_clks) { 816 unsigned long long val; 817 818 /* 819 * data->timeout_clks is in units of clock cycles. 820 * host->clock is in Hz. target_timeout is in us. 821 * Hence, us = 1000000 * cycles / Hz. Round up. 822 */ 823 val = 1000000ULL * data->timeout_clks; 824 if (do_div(val, host->clock)) 825 target_timeout++; 826 target_timeout += val; 827 } 828 } 829 830 return target_timeout; 831 } 832 833 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 834 struct mmc_command *cmd) 835 { 836 struct mmc_data *data = cmd->data; 837 struct mmc_host *mmc = host->mmc; 838 struct mmc_ios *ios = &mmc->ios; 839 unsigned char bus_width = 1 << ios->bus_width; 840 unsigned int blksz; 841 unsigned int freq; 842 u64 target_timeout; 843 u64 transfer_time; 844 845 target_timeout = sdhci_target_timeout(host, cmd, data); 846 target_timeout *= NSEC_PER_USEC; 847 848 if (data) { 849 blksz = data->blksz; 850 freq = host->mmc->actual_clock ? : host->clock; 851 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 852 do_div(transfer_time, freq); 853 /* multiply by '2' to account for any unknowns */ 854 transfer_time = transfer_time * 2; 855 /* calculate timeout for the entire data */ 856 host->data_timeout = data->blocks * target_timeout + 857 transfer_time; 858 } else { 859 host->data_timeout = target_timeout; 860 } 861 862 if (host->data_timeout) 863 host->data_timeout += MMC_CMD_TRANSFER_TIME; 864 } 865 866 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 867 bool *too_big) 868 { 869 u8 count; 870 struct mmc_data *data = cmd->data; 871 unsigned target_timeout, current_timeout; 872 873 *too_big = true; 874 875 /* 876 * If the host controller provides us with an incorrect timeout 877 * value, just skip the check and use 0xE. The hardware may take 878 * longer to time out, but that's much better than having a too-short 879 * timeout value. 880 */ 881 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 882 return 0xE; 883 884 /* Unspecified timeout, assume max */ 885 if (!data && !cmd->busy_timeout) 886 return 0xE; 887 888 /* timeout in us */ 889 target_timeout = sdhci_target_timeout(host, cmd, data); 890 891 /* 892 * Figure out needed cycles. 893 * We do this in steps in order to fit inside a 32 bit int. 894 * The first step is the minimum timeout, which will have a 895 * minimum resolution of 6 bits: 896 * (1) 2^13*1000 > 2^22, 897 * (2) host->timeout_clk < 2^16 898 * => 899 * (1) / (2) > 2^6 900 */ 901 count = 0; 902 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 903 while (current_timeout < target_timeout) { 904 count++; 905 current_timeout <<= 1; 906 if (count >= 0xF) 907 break; 908 } 909 910 if (count >= 0xF) { 911 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 912 DBG("Too large timeout 0x%x requested for CMD%d!\n", 913 count, cmd->opcode); 914 count = 0xE; 915 } else { 916 *too_big = false; 917 } 918 919 return count; 920 } 921 922 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 923 { 924 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 925 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 926 927 if (host->flags & SDHCI_REQ_USE_DMA) 928 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 929 else 930 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 931 932 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 933 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 934 } 935 936 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 937 { 938 if (enable) 939 host->ier |= SDHCI_INT_DATA_TIMEOUT; 940 else 941 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 942 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 943 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 944 } 945 946 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 947 { 948 u8 count; 949 950 if (host->ops->set_timeout) { 951 host->ops->set_timeout(host, cmd); 952 } else { 953 bool too_big = false; 954 955 count = sdhci_calc_timeout(host, cmd, &too_big); 956 957 if (too_big && 958 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 959 sdhci_calc_sw_timeout(host, cmd); 960 sdhci_set_data_timeout_irq(host, false); 961 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 962 sdhci_set_data_timeout_irq(host, true); 963 } 964 965 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 966 } 967 } 968 969 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 970 { 971 struct mmc_data *data = cmd->data; 972 973 host->data_timeout = 0; 974 975 if (sdhci_data_line_cmd(cmd)) 976 sdhci_set_timeout(host, cmd); 977 978 if (!data) 979 return; 980 981 WARN_ON(host->data); 982 983 /* Sanity checks */ 984 BUG_ON(data->blksz * data->blocks > 524288); 985 BUG_ON(data->blksz > host->mmc->max_blk_size); 986 BUG_ON(data->blocks > 65535); 987 988 host->data = data; 989 host->data_early = 0; 990 host->data->bytes_xfered = 0; 991 992 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 993 struct scatterlist *sg; 994 unsigned int length_mask, offset_mask; 995 int i; 996 997 host->flags |= SDHCI_REQ_USE_DMA; 998 999 /* 1000 * FIXME: This doesn't account for merging when mapping the 1001 * scatterlist. 1002 * 1003 * The assumption here being that alignment and lengths are 1004 * the same after DMA mapping to device address space. 1005 */ 1006 length_mask = 0; 1007 offset_mask = 0; 1008 if (host->flags & SDHCI_USE_ADMA) { 1009 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1010 length_mask = 3; 1011 /* 1012 * As we use up to 3 byte chunks to work 1013 * around alignment problems, we need to 1014 * check the offset as well. 1015 */ 1016 offset_mask = 3; 1017 } 1018 } else { 1019 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1020 length_mask = 3; 1021 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1022 offset_mask = 3; 1023 } 1024 1025 if (unlikely(length_mask | offset_mask)) { 1026 for_each_sg(data->sg, sg, data->sg_len, i) { 1027 if (sg->length & length_mask) { 1028 DBG("Reverting to PIO because of transfer size (%d)\n", 1029 sg->length); 1030 host->flags &= ~SDHCI_REQ_USE_DMA; 1031 break; 1032 } 1033 if (sg->offset & offset_mask) { 1034 DBG("Reverting to PIO because of bad alignment\n"); 1035 host->flags &= ~SDHCI_REQ_USE_DMA; 1036 break; 1037 } 1038 } 1039 } 1040 } 1041 1042 if (host->flags & SDHCI_REQ_USE_DMA) { 1043 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1044 1045 if (sg_cnt <= 0) { 1046 /* 1047 * This only happens when someone fed 1048 * us an invalid request. 1049 */ 1050 WARN_ON(1); 1051 host->flags &= ~SDHCI_REQ_USE_DMA; 1052 } else if (host->flags & SDHCI_USE_ADMA) { 1053 sdhci_adma_table_pre(host, data, sg_cnt); 1054 1055 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS); 1056 if (host->flags & SDHCI_USE_64_BIT_DMA) 1057 sdhci_writel(host, 1058 (u64)host->adma_addr >> 32, 1059 SDHCI_ADMA_ADDRESS_HI); 1060 } else { 1061 WARN_ON(sg_cnt != 1); 1062 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1063 } 1064 } 1065 1066 sdhci_config_dma(host); 1067 1068 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1069 int flags; 1070 1071 flags = SG_MITER_ATOMIC; 1072 if (host->data->flags & MMC_DATA_READ) 1073 flags |= SG_MITER_TO_SG; 1074 else 1075 flags |= SG_MITER_FROM_SG; 1076 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1077 host->blocks = data->blocks; 1078 } 1079 1080 sdhci_set_transfer_irqs(host); 1081 1082 /* Set the DMA boundary value and block size */ 1083 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1084 SDHCI_BLOCK_SIZE); 1085 1086 /* 1087 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1088 * can be supported, in that case 16-bit block count register must be 0. 1089 */ 1090 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1091 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1092 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1093 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1094 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1095 } else { 1096 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1097 } 1098 } 1099 1100 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1101 struct mmc_request *mrq) 1102 { 1103 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1104 !mrq->cap_cmd_during_tfr; 1105 } 1106 1107 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1108 struct mmc_command *cmd, 1109 u16 *mode) 1110 { 1111 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1112 (cmd->opcode != SD_IO_RW_EXTENDED); 1113 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1114 u16 ctrl2; 1115 1116 /* 1117 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1118 * Select' is recommended rather than use of 'Auto CMD12 1119 * Enable' or 'Auto CMD23 Enable'. 1120 */ 1121 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { 1122 *mode |= SDHCI_TRNS_AUTO_SEL; 1123 1124 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1125 if (use_cmd23) 1126 ctrl2 |= SDHCI_CMD23_ENABLE; 1127 else 1128 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1129 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1130 1131 return; 1132 } 1133 1134 /* 1135 * If we are sending CMD23, CMD12 never gets sent 1136 * on successful completion (so no Auto-CMD12). 1137 */ 1138 if (use_cmd12) 1139 *mode |= SDHCI_TRNS_AUTO_CMD12; 1140 else if (use_cmd23) 1141 *mode |= SDHCI_TRNS_AUTO_CMD23; 1142 } 1143 1144 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1145 struct mmc_command *cmd) 1146 { 1147 u16 mode = 0; 1148 struct mmc_data *data = cmd->data; 1149 1150 if (data == NULL) { 1151 if (host->quirks2 & 1152 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1153 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1154 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1155 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1156 } else { 1157 /* clear Auto CMD settings for no data CMDs */ 1158 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1159 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1160 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1161 } 1162 return; 1163 } 1164 1165 WARN_ON(!host->data); 1166 1167 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1168 mode = SDHCI_TRNS_BLK_CNT_EN; 1169 1170 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1171 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1172 sdhci_auto_cmd_select(host, cmd, &mode); 1173 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) 1174 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1175 } 1176 1177 if (data->flags & MMC_DATA_READ) 1178 mode |= SDHCI_TRNS_READ; 1179 if (host->flags & SDHCI_REQ_USE_DMA) 1180 mode |= SDHCI_TRNS_DMA; 1181 1182 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1183 } 1184 1185 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1186 { 1187 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1188 ((mrq->cmd && mrq->cmd->error) || 1189 (mrq->sbc && mrq->sbc->error) || 1190 (mrq->data && ((mrq->data->error && !mrq->data->stop) || 1191 (mrq->data->stop && mrq->data->stop->error))) || 1192 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1193 } 1194 1195 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1196 { 1197 int i; 1198 1199 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1200 if (host->mrqs_done[i] == mrq) { 1201 WARN_ON(1); 1202 return; 1203 } 1204 } 1205 1206 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1207 if (!host->mrqs_done[i]) { 1208 host->mrqs_done[i] = mrq; 1209 break; 1210 } 1211 } 1212 1213 WARN_ON(i >= SDHCI_MAX_MRQS); 1214 1215 tasklet_schedule(&host->finish_tasklet); 1216 } 1217 1218 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1219 { 1220 if (host->cmd && host->cmd->mrq == mrq) 1221 host->cmd = NULL; 1222 1223 if (host->data_cmd && host->data_cmd->mrq == mrq) 1224 host->data_cmd = NULL; 1225 1226 if (host->data && host->data->mrq == mrq) 1227 host->data = NULL; 1228 1229 if (sdhci_needs_reset(host, mrq)) 1230 host->pending_reset = true; 1231 1232 __sdhci_finish_mrq(host, mrq); 1233 } 1234 1235 static void sdhci_finish_data(struct sdhci_host *host) 1236 { 1237 struct mmc_command *data_cmd = host->data_cmd; 1238 struct mmc_data *data = host->data; 1239 1240 host->data = NULL; 1241 host->data_cmd = NULL; 1242 1243 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1244 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1245 sdhci_adma_table_post(host, data); 1246 1247 /* 1248 * The specification states that the block count register must 1249 * be updated, but it does not specify at what point in the 1250 * data flow. That makes the register entirely useless to read 1251 * back so we have to assume that nothing made it to the card 1252 * in the event of an error. 1253 */ 1254 if (data->error) 1255 data->bytes_xfered = 0; 1256 else 1257 data->bytes_xfered = data->blksz * data->blocks; 1258 1259 /* 1260 * Need to send CMD12 if - 1261 * a) open-ended multiblock transfer (no CMD23) 1262 * b) error in multiblock transfer 1263 */ 1264 if (data->stop && 1265 (data->error || 1266 !data->mrq->sbc)) { 1267 1268 /* 1269 * The controller needs a reset of internal state machines 1270 * upon error conditions. 1271 */ 1272 if (data->error) { 1273 if (!host->cmd || host->cmd == data_cmd) 1274 sdhci_do_reset(host, SDHCI_RESET_CMD); 1275 sdhci_do_reset(host, SDHCI_RESET_DATA); 1276 } 1277 1278 /* 1279 * 'cap_cmd_during_tfr' request must not use the command line 1280 * after mmc_command_done() has been called. It is upper layer's 1281 * responsibility to send the stop command if required. 1282 */ 1283 if (data->mrq->cap_cmd_during_tfr) { 1284 sdhci_finish_mrq(host, data->mrq); 1285 } else { 1286 /* Avoid triggering warning in sdhci_send_command() */ 1287 host->cmd = NULL; 1288 sdhci_send_command(host, data->stop); 1289 } 1290 } else { 1291 sdhci_finish_mrq(host, data->mrq); 1292 } 1293 } 1294 1295 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 1296 unsigned long timeout) 1297 { 1298 if (sdhci_data_line_cmd(mrq->cmd)) 1299 mod_timer(&host->data_timer, timeout); 1300 else 1301 mod_timer(&host->timer, timeout); 1302 } 1303 1304 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 1305 { 1306 if (sdhci_data_line_cmd(mrq->cmd)) 1307 del_timer(&host->data_timer); 1308 else 1309 del_timer(&host->timer); 1310 } 1311 1312 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1313 { 1314 int flags; 1315 u32 mask; 1316 unsigned long timeout; 1317 1318 WARN_ON(host->cmd); 1319 1320 /* Initially, a command has no error */ 1321 cmd->error = 0; 1322 1323 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1324 cmd->opcode == MMC_STOP_TRANSMISSION) 1325 cmd->flags |= MMC_RSP_BUSY; 1326 1327 /* Wait max 10 ms */ 1328 timeout = 10; 1329 1330 mask = SDHCI_CMD_INHIBIT; 1331 if (sdhci_data_line_cmd(cmd)) 1332 mask |= SDHCI_DATA_INHIBIT; 1333 1334 /* We shouldn't wait for data inihibit for stop commands, even 1335 though they might use busy signaling */ 1336 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1337 mask &= ~SDHCI_DATA_INHIBIT; 1338 1339 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 1340 if (timeout == 0) { 1341 pr_err("%s: Controller never released inhibit bit(s).\n", 1342 mmc_hostname(host->mmc)); 1343 sdhci_dumpregs(host); 1344 cmd->error = -EIO; 1345 sdhci_finish_mrq(host, cmd->mrq); 1346 return; 1347 } 1348 timeout--; 1349 mdelay(1); 1350 } 1351 1352 host->cmd = cmd; 1353 if (sdhci_data_line_cmd(cmd)) { 1354 WARN_ON(host->data_cmd); 1355 host->data_cmd = cmd; 1356 } 1357 1358 sdhci_prepare_data(host, cmd); 1359 1360 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1361 1362 sdhci_set_transfer_mode(host, cmd); 1363 1364 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1365 pr_err("%s: Unsupported response type!\n", 1366 mmc_hostname(host->mmc)); 1367 cmd->error = -EINVAL; 1368 sdhci_finish_mrq(host, cmd->mrq); 1369 return; 1370 } 1371 1372 if (!(cmd->flags & MMC_RSP_PRESENT)) 1373 flags = SDHCI_CMD_RESP_NONE; 1374 else if (cmd->flags & MMC_RSP_136) 1375 flags = SDHCI_CMD_RESP_LONG; 1376 else if (cmd->flags & MMC_RSP_BUSY) 1377 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1378 else 1379 flags = SDHCI_CMD_RESP_SHORT; 1380 1381 if (cmd->flags & MMC_RSP_CRC) 1382 flags |= SDHCI_CMD_CRC; 1383 if (cmd->flags & MMC_RSP_OPCODE) 1384 flags |= SDHCI_CMD_INDEX; 1385 1386 /* CMD19 is special in that the Data Present Select should be set */ 1387 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1388 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1389 flags |= SDHCI_CMD_DATA; 1390 1391 timeout = jiffies; 1392 if (host->data_timeout) 1393 timeout += nsecs_to_jiffies(host->data_timeout); 1394 else if (!cmd->data && cmd->busy_timeout > 9000) 1395 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1396 else 1397 timeout += 10 * HZ; 1398 sdhci_mod_timer(host, cmd->mrq, timeout); 1399 1400 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1401 } 1402 EXPORT_SYMBOL_GPL(sdhci_send_command); 1403 1404 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1405 { 1406 int i, reg; 1407 1408 for (i = 0; i < 4; i++) { 1409 reg = SDHCI_RESPONSE + (3 - i) * 4; 1410 cmd->resp[i] = sdhci_readl(host, reg); 1411 } 1412 1413 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1414 return; 1415 1416 /* CRC is stripped so we need to do some shifting */ 1417 for (i = 0; i < 4; i++) { 1418 cmd->resp[i] <<= 8; 1419 if (i != 3) 1420 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1421 } 1422 } 1423 1424 static void sdhci_finish_command(struct sdhci_host *host) 1425 { 1426 struct mmc_command *cmd = host->cmd; 1427 1428 host->cmd = NULL; 1429 1430 if (cmd->flags & MMC_RSP_PRESENT) { 1431 if (cmd->flags & MMC_RSP_136) { 1432 sdhci_read_rsp_136(host, cmd); 1433 } else { 1434 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1435 } 1436 } 1437 1438 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1439 mmc_command_done(host->mmc, cmd->mrq); 1440 1441 /* 1442 * The host can send and interrupt when the busy state has 1443 * ended, allowing us to wait without wasting CPU cycles. 1444 * The busy signal uses DAT0 so this is similar to waiting 1445 * for data to complete. 1446 * 1447 * Note: The 1.0 specification is a bit ambiguous about this 1448 * feature so there might be some problems with older 1449 * controllers. 1450 */ 1451 if (cmd->flags & MMC_RSP_BUSY) { 1452 if (cmd->data) { 1453 DBG("Cannot wait for busy signal when also doing a data transfer"); 1454 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1455 cmd == host->data_cmd) { 1456 /* Command complete before busy is ended */ 1457 return; 1458 } 1459 } 1460 1461 /* Finished CMD23, now send actual command. */ 1462 if (cmd == cmd->mrq->sbc) { 1463 sdhci_send_command(host, cmd->mrq->cmd); 1464 } else { 1465 1466 /* Processed actual command. */ 1467 if (host->data && host->data_early) 1468 sdhci_finish_data(host); 1469 1470 if (!cmd->data) 1471 sdhci_finish_mrq(host, cmd->mrq); 1472 } 1473 } 1474 1475 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1476 { 1477 u16 preset = 0; 1478 1479 switch (host->timing) { 1480 case MMC_TIMING_UHS_SDR12: 1481 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1482 break; 1483 case MMC_TIMING_UHS_SDR25: 1484 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1485 break; 1486 case MMC_TIMING_UHS_SDR50: 1487 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1488 break; 1489 case MMC_TIMING_UHS_SDR104: 1490 case MMC_TIMING_MMC_HS200: 1491 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1492 break; 1493 case MMC_TIMING_UHS_DDR50: 1494 case MMC_TIMING_MMC_DDR52: 1495 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1496 break; 1497 case MMC_TIMING_MMC_HS400: 1498 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1499 break; 1500 default: 1501 pr_warn("%s: Invalid UHS-I mode selected\n", 1502 mmc_hostname(host->mmc)); 1503 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1504 break; 1505 } 1506 return preset; 1507 } 1508 1509 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1510 unsigned int *actual_clock) 1511 { 1512 int div = 0; /* Initialized for compiler warning */ 1513 int real_div = div, clk_mul = 1; 1514 u16 clk = 0; 1515 bool switch_base_clk = false; 1516 1517 if (host->version >= SDHCI_SPEC_300) { 1518 if (host->preset_enabled) { 1519 u16 pre_val; 1520 1521 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1522 pre_val = sdhci_get_preset_value(host); 1523 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) 1524 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; 1525 if (host->clk_mul && 1526 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { 1527 clk = SDHCI_PROG_CLOCK_MODE; 1528 real_div = div + 1; 1529 clk_mul = host->clk_mul; 1530 } else { 1531 real_div = max_t(int, 1, div << 1); 1532 } 1533 goto clock_set; 1534 } 1535 1536 /* 1537 * Check if the Host Controller supports Programmable Clock 1538 * Mode. 1539 */ 1540 if (host->clk_mul) { 1541 for (div = 1; div <= 1024; div++) { 1542 if ((host->max_clk * host->clk_mul / div) 1543 <= clock) 1544 break; 1545 } 1546 if ((host->max_clk * host->clk_mul / div) <= clock) { 1547 /* 1548 * Set Programmable Clock Mode in the Clock 1549 * Control register. 1550 */ 1551 clk = SDHCI_PROG_CLOCK_MODE; 1552 real_div = div; 1553 clk_mul = host->clk_mul; 1554 div--; 1555 } else { 1556 /* 1557 * Divisor can be too small to reach clock 1558 * speed requirement. Then use the base clock. 1559 */ 1560 switch_base_clk = true; 1561 } 1562 } 1563 1564 if (!host->clk_mul || switch_base_clk) { 1565 /* Version 3.00 divisors must be a multiple of 2. */ 1566 if (host->max_clk <= clock) 1567 div = 1; 1568 else { 1569 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1570 div += 2) { 1571 if ((host->max_clk / div) <= clock) 1572 break; 1573 } 1574 } 1575 real_div = div; 1576 div >>= 1; 1577 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1578 && !div && host->max_clk <= 25000000) 1579 div = 1; 1580 } 1581 } else { 1582 /* Version 2.00 divisors must be a power of 2. */ 1583 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1584 if ((host->max_clk / div) <= clock) 1585 break; 1586 } 1587 real_div = div; 1588 div >>= 1; 1589 } 1590 1591 clock_set: 1592 if (real_div) 1593 *actual_clock = (host->max_clk * clk_mul) / real_div; 1594 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1595 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1596 << SDHCI_DIVIDER_HI_SHIFT; 1597 1598 return clk; 1599 } 1600 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1601 1602 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1603 { 1604 ktime_t timeout; 1605 1606 clk |= SDHCI_CLOCK_INT_EN; 1607 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1608 1609 /* Wait max 20 ms */ 1610 timeout = ktime_add_ms(ktime_get(), 20); 1611 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1612 & SDHCI_CLOCK_INT_STABLE)) { 1613 if (ktime_after(ktime_get(), timeout)) { 1614 pr_err("%s: Internal clock never stabilised.\n", 1615 mmc_hostname(host->mmc)); 1616 sdhci_dumpregs(host); 1617 return; 1618 } 1619 udelay(10); 1620 } 1621 1622 clk |= SDHCI_CLOCK_CARD_EN; 1623 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1624 } 1625 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 1626 1627 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1628 { 1629 u16 clk; 1630 1631 host->mmc->actual_clock = 0; 1632 1633 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1634 1635 if (clock == 0) 1636 return; 1637 1638 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 1639 sdhci_enable_clk(host, clk); 1640 } 1641 EXPORT_SYMBOL_GPL(sdhci_set_clock); 1642 1643 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 1644 unsigned short vdd) 1645 { 1646 struct mmc_host *mmc = host->mmc; 1647 1648 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1649 1650 if (mode != MMC_POWER_OFF) 1651 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1652 else 1653 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1654 } 1655 1656 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 1657 unsigned short vdd) 1658 { 1659 u8 pwr = 0; 1660 1661 if (mode != MMC_POWER_OFF) { 1662 switch (1 << vdd) { 1663 case MMC_VDD_165_195: 1664 /* 1665 * Without a regulator, SDHCI does not support 2.0v 1666 * so we only get here if the driver deliberately 1667 * added the 2.0v range to ocr_avail. Map it to 1.8v 1668 * for the purpose of turning on the power. 1669 */ 1670 case MMC_VDD_20_21: 1671 pwr = SDHCI_POWER_180; 1672 break; 1673 case MMC_VDD_29_30: 1674 case MMC_VDD_30_31: 1675 pwr = SDHCI_POWER_300; 1676 break; 1677 case MMC_VDD_32_33: 1678 case MMC_VDD_33_34: 1679 pwr = SDHCI_POWER_330; 1680 break; 1681 default: 1682 WARN(1, "%s: Invalid vdd %#x\n", 1683 mmc_hostname(host->mmc), vdd); 1684 break; 1685 } 1686 } 1687 1688 if (host->pwr == pwr) 1689 return; 1690 1691 host->pwr = pwr; 1692 1693 if (pwr == 0) { 1694 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1695 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1696 sdhci_runtime_pm_bus_off(host); 1697 } else { 1698 /* 1699 * Spec says that we should clear the power reg before setting 1700 * a new value. Some controllers don't seem to like this though. 1701 */ 1702 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1703 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1704 1705 /* 1706 * At least the Marvell CaFe chip gets confused if we set the 1707 * voltage and set turn on power at the same time, so set the 1708 * voltage first. 1709 */ 1710 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1711 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1712 1713 pwr |= SDHCI_POWER_ON; 1714 1715 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1716 1717 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1718 sdhci_runtime_pm_bus_on(host); 1719 1720 /* 1721 * Some controllers need an extra 10ms delay of 10ms before 1722 * they can apply clock after applying power 1723 */ 1724 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1725 mdelay(10); 1726 } 1727 } 1728 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 1729 1730 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1731 unsigned short vdd) 1732 { 1733 if (IS_ERR(host->mmc->supply.vmmc)) 1734 sdhci_set_power_noreg(host, mode, vdd); 1735 else 1736 sdhci_set_power_reg(host, mode, vdd); 1737 } 1738 EXPORT_SYMBOL_GPL(sdhci_set_power); 1739 1740 /*****************************************************************************\ 1741 * * 1742 * MMC callbacks * 1743 * * 1744 \*****************************************************************************/ 1745 1746 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1747 { 1748 struct sdhci_host *host; 1749 int present; 1750 unsigned long flags; 1751 1752 host = mmc_priv(mmc); 1753 1754 /* Firstly check card presence */ 1755 present = mmc->ops->get_cd(mmc); 1756 1757 spin_lock_irqsave(&host->lock, flags); 1758 1759 sdhci_led_activate(host); 1760 1761 /* 1762 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED 1763 * requests if Auto-CMD12 is enabled. 1764 */ 1765 if (sdhci_auto_cmd12(host, mrq)) { 1766 if (mrq->stop) { 1767 mrq->data->stop = NULL; 1768 mrq->stop = NULL; 1769 } 1770 } 1771 1772 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1773 mrq->cmd->error = -ENOMEDIUM; 1774 sdhci_finish_mrq(host, mrq); 1775 } else { 1776 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1777 sdhci_send_command(host, mrq->sbc); 1778 else 1779 sdhci_send_command(host, mrq->cmd); 1780 } 1781 1782 mmiowb(); 1783 spin_unlock_irqrestore(&host->lock, flags); 1784 } 1785 EXPORT_SYMBOL_GPL(sdhci_request); 1786 1787 void sdhci_set_bus_width(struct sdhci_host *host, int width) 1788 { 1789 u8 ctrl; 1790 1791 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1792 if (width == MMC_BUS_WIDTH_8) { 1793 ctrl &= ~SDHCI_CTRL_4BITBUS; 1794 ctrl |= SDHCI_CTRL_8BITBUS; 1795 } else { 1796 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 1797 ctrl &= ~SDHCI_CTRL_8BITBUS; 1798 if (width == MMC_BUS_WIDTH_4) 1799 ctrl |= SDHCI_CTRL_4BITBUS; 1800 else 1801 ctrl &= ~SDHCI_CTRL_4BITBUS; 1802 } 1803 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1804 } 1805 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 1806 1807 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 1808 { 1809 u16 ctrl_2; 1810 1811 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1812 /* Select Bus Speed Mode for host */ 1813 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 1814 if ((timing == MMC_TIMING_MMC_HS200) || 1815 (timing == MMC_TIMING_UHS_SDR104)) 1816 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 1817 else if (timing == MMC_TIMING_UHS_SDR12) 1818 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 1819 else if (timing == MMC_TIMING_UHS_SDR25) 1820 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 1821 else if (timing == MMC_TIMING_UHS_SDR50) 1822 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 1823 else if ((timing == MMC_TIMING_UHS_DDR50) || 1824 (timing == MMC_TIMING_MMC_DDR52)) 1825 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1826 else if (timing == MMC_TIMING_MMC_HS400) 1827 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 1828 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1829 } 1830 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1831 1832 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1833 { 1834 struct sdhci_host *host = mmc_priv(mmc); 1835 u8 ctrl; 1836 1837 if (ios->power_mode == MMC_POWER_UNDEFINED) 1838 return; 1839 1840 if (host->flags & SDHCI_DEVICE_DEAD) { 1841 if (!IS_ERR(mmc->supply.vmmc) && 1842 ios->power_mode == MMC_POWER_OFF) 1843 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1844 return; 1845 } 1846 1847 /* 1848 * Reset the chip on each power off. 1849 * Should clear out any weird states. 1850 */ 1851 if (ios->power_mode == MMC_POWER_OFF) { 1852 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1853 sdhci_reinit(host); 1854 } 1855 1856 if (host->version >= SDHCI_SPEC_300 && 1857 (ios->power_mode == MMC_POWER_UP) && 1858 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 1859 sdhci_enable_preset_value(host, false); 1860 1861 if (!ios->clock || ios->clock != host->clock) { 1862 host->ops->set_clock(host, ios->clock); 1863 host->clock = ios->clock; 1864 1865 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 1866 host->clock) { 1867 host->timeout_clk = host->mmc->actual_clock ? 1868 host->mmc->actual_clock / 1000 : 1869 host->clock / 1000; 1870 host->mmc->max_busy_timeout = 1871 host->ops->get_max_timeout_count ? 1872 host->ops->get_max_timeout_count(host) : 1873 1 << 27; 1874 host->mmc->max_busy_timeout /= host->timeout_clk; 1875 } 1876 } 1877 1878 if (host->ops->set_power) 1879 host->ops->set_power(host, ios->power_mode, ios->vdd); 1880 else 1881 sdhci_set_power(host, ios->power_mode, ios->vdd); 1882 1883 if (host->ops->platform_send_init_74_clocks) 1884 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1885 1886 host->ops->set_bus_width(host, ios->bus_width); 1887 1888 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1889 1890 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 1891 if (ios->timing == MMC_TIMING_SD_HS || 1892 ios->timing == MMC_TIMING_MMC_HS || 1893 ios->timing == MMC_TIMING_MMC_HS400 || 1894 ios->timing == MMC_TIMING_MMC_HS200 || 1895 ios->timing == MMC_TIMING_MMC_DDR52 || 1896 ios->timing == MMC_TIMING_UHS_SDR50 || 1897 ios->timing == MMC_TIMING_UHS_SDR104 || 1898 ios->timing == MMC_TIMING_UHS_DDR50 || 1899 ios->timing == MMC_TIMING_UHS_SDR25) 1900 ctrl |= SDHCI_CTRL_HISPD; 1901 else 1902 ctrl &= ~SDHCI_CTRL_HISPD; 1903 } 1904 1905 if (host->version >= SDHCI_SPEC_300) { 1906 u16 clk, ctrl_2; 1907 1908 if (!host->preset_enabled) { 1909 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1910 /* 1911 * We only need to set Driver Strength if the 1912 * preset value enable is not set. 1913 */ 1914 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1915 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 1916 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 1917 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 1918 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 1919 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1920 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 1921 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 1922 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 1923 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 1924 else { 1925 pr_warn("%s: invalid driver type, default to driver type B\n", 1926 mmc_hostname(mmc)); 1927 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 1928 } 1929 1930 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1931 } else { 1932 /* 1933 * According to SDHC Spec v3.00, if the Preset Value 1934 * Enable in the Host Control 2 register is set, we 1935 * need to reset SD Clock Enable before changing High 1936 * Speed Enable to avoid generating clock gliches. 1937 */ 1938 1939 /* Reset SD Clock Enable */ 1940 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1941 clk &= ~SDHCI_CLOCK_CARD_EN; 1942 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1943 1944 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1945 1946 /* Re-enable SD Clock */ 1947 host->ops->set_clock(host, host->clock); 1948 } 1949 1950 /* Reset SD Clock Enable */ 1951 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1952 clk &= ~SDHCI_CLOCK_CARD_EN; 1953 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1954 1955 host->ops->set_uhs_signaling(host, ios->timing); 1956 host->timing = ios->timing; 1957 1958 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 1959 ((ios->timing == MMC_TIMING_UHS_SDR12) || 1960 (ios->timing == MMC_TIMING_UHS_SDR25) || 1961 (ios->timing == MMC_TIMING_UHS_SDR50) || 1962 (ios->timing == MMC_TIMING_UHS_SDR104) || 1963 (ios->timing == MMC_TIMING_UHS_DDR50) || 1964 (ios->timing == MMC_TIMING_MMC_DDR52))) { 1965 u16 preset; 1966 1967 sdhci_enable_preset_value(host, true); 1968 preset = sdhci_get_preset_value(host); 1969 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) 1970 >> SDHCI_PRESET_DRV_SHIFT; 1971 } 1972 1973 /* Re-enable SD Clock */ 1974 host->ops->set_clock(host, host->clock); 1975 } else 1976 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1977 1978 /* 1979 * Some (ENE) controllers go apeshit on some ios operation, 1980 * signalling timeout and CRC errors even on CMD0. Resetting 1981 * it on each ios seems to solve the problem. 1982 */ 1983 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1984 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1985 1986 mmiowb(); 1987 } 1988 EXPORT_SYMBOL_GPL(sdhci_set_ios); 1989 1990 static int sdhci_get_cd(struct mmc_host *mmc) 1991 { 1992 struct sdhci_host *host = mmc_priv(mmc); 1993 int gpio_cd = mmc_gpio_get_cd(mmc); 1994 1995 if (host->flags & SDHCI_DEVICE_DEAD) 1996 return 0; 1997 1998 /* If nonremovable, assume that the card is always present. */ 1999 if (!mmc_card_is_removable(host->mmc)) 2000 return 1; 2001 2002 /* 2003 * Try slot gpio detect, if defined it take precedence 2004 * over build in controller functionality 2005 */ 2006 if (gpio_cd >= 0) 2007 return !!gpio_cd; 2008 2009 /* If polling, assume that the card is always present. */ 2010 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2011 return 1; 2012 2013 /* Host native card detect */ 2014 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2015 } 2016 2017 static int sdhci_check_ro(struct sdhci_host *host) 2018 { 2019 unsigned long flags; 2020 int is_readonly; 2021 2022 spin_lock_irqsave(&host->lock, flags); 2023 2024 if (host->flags & SDHCI_DEVICE_DEAD) 2025 is_readonly = 0; 2026 else if (host->ops->get_ro) 2027 is_readonly = host->ops->get_ro(host); 2028 else 2029 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2030 & SDHCI_WRITE_PROTECT); 2031 2032 spin_unlock_irqrestore(&host->lock, flags); 2033 2034 /* This quirk needs to be replaced by a callback-function later */ 2035 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2036 !is_readonly : is_readonly; 2037 } 2038 2039 #define SAMPLE_COUNT 5 2040 2041 static int sdhci_get_ro(struct mmc_host *mmc) 2042 { 2043 struct sdhci_host *host = mmc_priv(mmc); 2044 int i, ro_count; 2045 2046 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2047 return sdhci_check_ro(host); 2048 2049 ro_count = 0; 2050 for (i = 0; i < SAMPLE_COUNT; i++) { 2051 if (sdhci_check_ro(host)) { 2052 if (++ro_count > SAMPLE_COUNT / 2) 2053 return 1; 2054 } 2055 msleep(30); 2056 } 2057 return 0; 2058 } 2059 2060 static void sdhci_hw_reset(struct mmc_host *mmc) 2061 { 2062 struct sdhci_host *host = mmc_priv(mmc); 2063 2064 if (host->ops && host->ops->hw_reset) 2065 host->ops->hw_reset(host); 2066 } 2067 2068 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2069 { 2070 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2071 if (enable) 2072 host->ier |= SDHCI_INT_CARD_INT; 2073 else 2074 host->ier &= ~SDHCI_INT_CARD_INT; 2075 2076 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2077 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2078 mmiowb(); 2079 } 2080 } 2081 2082 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2083 { 2084 struct sdhci_host *host = mmc_priv(mmc); 2085 unsigned long flags; 2086 2087 if (enable) 2088 pm_runtime_get_noresume(host->mmc->parent); 2089 2090 spin_lock_irqsave(&host->lock, flags); 2091 if (enable) 2092 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 2093 else 2094 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; 2095 2096 sdhci_enable_sdio_irq_nolock(host, enable); 2097 spin_unlock_irqrestore(&host->lock, flags); 2098 2099 if (!enable) 2100 pm_runtime_put_noidle(host->mmc->parent); 2101 } 2102 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2103 2104 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2105 struct mmc_ios *ios) 2106 { 2107 struct sdhci_host *host = mmc_priv(mmc); 2108 u16 ctrl; 2109 int ret; 2110 2111 /* 2112 * Signal Voltage Switching is only applicable for Host Controllers 2113 * v3.00 and above. 2114 */ 2115 if (host->version < SDHCI_SPEC_300) 2116 return 0; 2117 2118 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2119 2120 switch (ios->signal_voltage) { 2121 case MMC_SIGNAL_VOLTAGE_330: 2122 if (!(host->flags & SDHCI_SIGNALING_330)) 2123 return -EINVAL; 2124 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2125 ctrl &= ~SDHCI_CTRL_VDD_180; 2126 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2127 2128 if (!IS_ERR(mmc->supply.vqmmc)) { 2129 ret = mmc_regulator_set_vqmmc(mmc, ios); 2130 if (ret) { 2131 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2132 mmc_hostname(mmc)); 2133 return -EIO; 2134 } 2135 } 2136 /* Wait for 5ms */ 2137 usleep_range(5000, 5500); 2138 2139 /* 3.3V regulator output should be stable within 5 ms */ 2140 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2141 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2142 return 0; 2143 2144 pr_warn("%s: 3.3V regulator output did not became stable\n", 2145 mmc_hostname(mmc)); 2146 2147 return -EAGAIN; 2148 case MMC_SIGNAL_VOLTAGE_180: 2149 if (!(host->flags & SDHCI_SIGNALING_180)) 2150 return -EINVAL; 2151 if (!IS_ERR(mmc->supply.vqmmc)) { 2152 ret = mmc_regulator_set_vqmmc(mmc, ios); 2153 if (ret) { 2154 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2155 mmc_hostname(mmc)); 2156 return -EIO; 2157 } 2158 } 2159 2160 /* 2161 * Enable 1.8V Signal Enable in the Host Control2 2162 * register 2163 */ 2164 ctrl |= SDHCI_CTRL_VDD_180; 2165 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2166 2167 /* Some controller need to do more when switching */ 2168 if (host->ops->voltage_switch) 2169 host->ops->voltage_switch(host); 2170 2171 /* 1.8V regulator output should be stable within 5 ms */ 2172 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2173 if (ctrl & SDHCI_CTRL_VDD_180) 2174 return 0; 2175 2176 pr_warn("%s: 1.8V regulator output did not became stable\n", 2177 mmc_hostname(mmc)); 2178 2179 return -EAGAIN; 2180 case MMC_SIGNAL_VOLTAGE_120: 2181 if (!(host->flags & SDHCI_SIGNALING_120)) 2182 return -EINVAL; 2183 if (!IS_ERR(mmc->supply.vqmmc)) { 2184 ret = mmc_regulator_set_vqmmc(mmc, ios); 2185 if (ret) { 2186 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2187 mmc_hostname(mmc)); 2188 return -EIO; 2189 } 2190 } 2191 return 0; 2192 default: 2193 /* No signal voltage switch required */ 2194 return 0; 2195 } 2196 } 2197 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2198 2199 static int sdhci_card_busy(struct mmc_host *mmc) 2200 { 2201 struct sdhci_host *host = mmc_priv(mmc); 2202 u32 present_state; 2203 2204 /* Check whether DAT[0] is 0 */ 2205 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2206 2207 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2208 } 2209 2210 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2211 { 2212 struct sdhci_host *host = mmc_priv(mmc); 2213 unsigned long flags; 2214 2215 spin_lock_irqsave(&host->lock, flags); 2216 host->flags |= SDHCI_HS400_TUNING; 2217 spin_unlock_irqrestore(&host->lock, flags); 2218 2219 return 0; 2220 } 2221 2222 void sdhci_start_tuning(struct sdhci_host *host) 2223 { 2224 u16 ctrl; 2225 2226 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2227 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2228 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2229 ctrl |= SDHCI_CTRL_TUNED_CLK; 2230 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2231 2232 /* 2233 * As per the Host Controller spec v3.00, tuning command 2234 * generates Buffer Read Ready interrupt, so enable that. 2235 * 2236 * Note: The spec clearly says that when tuning sequence 2237 * is being performed, the controller does not generate 2238 * interrupts other than Buffer Read Ready interrupt. But 2239 * to make sure we don't hit a controller bug, we _only_ 2240 * enable Buffer Read Ready interrupt here. 2241 */ 2242 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2243 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2244 } 2245 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2246 2247 void sdhci_end_tuning(struct sdhci_host *host) 2248 { 2249 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2250 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2251 } 2252 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2253 2254 void sdhci_reset_tuning(struct sdhci_host *host) 2255 { 2256 u16 ctrl; 2257 2258 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2259 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2260 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2261 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2262 } 2263 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2264 2265 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2266 { 2267 sdhci_reset_tuning(host); 2268 2269 sdhci_do_reset(host, SDHCI_RESET_CMD); 2270 sdhci_do_reset(host, SDHCI_RESET_DATA); 2271 2272 sdhci_end_tuning(host); 2273 2274 mmc_abort_tuning(host->mmc, opcode); 2275 } 2276 2277 /* 2278 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2279 * tuning command does not have a data payload (or rather the hardware does it 2280 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2281 * interrupt setup is different to other commands and there is no timeout 2282 * interrupt so special handling is needed. 2283 */ 2284 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2285 { 2286 struct mmc_host *mmc = host->mmc; 2287 struct mmc_command cmd = {}; 2288 struct mmc_request mrq = {}; 2289 unsigned long flags; 2290 u32 b = host->sdma_boundary; 2291 2292 spin_lock_irqsave(&host->lock, flags); 2293 2294 cmd.opcode = opcode; 2295 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2296 cmd.mrq = &mrq; 2297 2298 mrq.cmd = &cmd; 2299 /* 2300 * In response to CMD19, the card sends 64 bytes of tuning 2301 * block to the Host Controller. So we set the block size 2302 * to 64 here. 2303 */ 2304 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2305 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2306 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2307 else 2308 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2309 2310 /* 2311 * The tuning block is sent by the card to the host controller. 2312 * So we set the TRNS_READ bit in the Transfer Mode register. 2313 * This also takes care of setting DMA Enable and Multi Block 2314 * Select in the same register to 0. 2315 */ 2316 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2317 2318 sdhci_send_command(host, &cmd); 2319 2320 host->cmd = NULL; 2321 2322 sdhci_del_timer(host, &mrq); 2323 2324 host->tuning_done = 0; 2325 2326 mmiowb(); 2327 spin_unlock_irqrestore(&host->lock, flags); 2328 2329 /* Wait for Buffer Read Ready interrupt */ 2330 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2331 msecs_to_jiffies(50)); 2332 2333 } 2334 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2335 2336 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2337 { 2338 int i; 2339 2340 /* 2341 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2342 * of loops reaches 40 times. 2343 */ 2344 for (i = 0; i < MAX_TUNING_LOOP; i++) { 2345 u16 ctrl; 2346 2347 sdhci_send_tuning(host, opcode); 2348 2349 if (!host->tuning_done) { 2350 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", 2351 mmc_hostname(host->mmc)); 2352 sdhci_abort_tuning(host, opcode); 2353 return -ETIMEDOUT; 2354 } 2355 2356 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2357 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2358 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2359 return 0; /* Success! */ 2360 break; 2361 } 2362 2363 /* Spec does not require a delay between tuning cycles */ 2364 if (host->tuning_delay > 0) 2365 mdelay(host->tuning_delay); 2366 } 2367 2368 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2369 mmc_hostname(host->mmc)); 2370 sdhci_reset_tuning(host); 2371 return -EAGAIN; 2372 } 2373 2374 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2375 { 2376 struct sdhci_host *host = mmc_priv(mmc); 2377 int err = 0; 2378 unsigned int tuning_count = 0; 2379 bool hs400_tuning; 2380 2381 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2382 2383 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2384 tuning_count = host->tuning_count; 2385 2386 /* 2387 * The Host Controller needs tuning in case of SDR104 and DDR50 2388 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2389 * the Capabilities register. 2390 * If the Host Controller supports the HS200 mode then the 2391 * tuning function has to be executed. 2392 */ 2393 switch (host->timing) { 2394 /* HS400 tuning is done in HS200 mode */ 2395 case MMC_TIMING_MMC_HS400: 2396 err = -EINVAL; 2397 goto out; 2398 2399 case MMC_TIMING_MMC_HS200: 2400 /* 2401 * Periodic re-tuning for HS400 is not expected to be needed, so 2402 * disable it here. 2403 */ 2404 if (hs400_tuning) 2405 tuning_count = 0; 2406 break; 2407 2408 case MMC_TIMING_UHS_SDR104: 2409 case MMC_TIMING_UHS_DDR50: 2410 break; 2411 2412 case MMC_TIMING_UHS_SDR50: 2413 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2414 break; 2415 /* FALLTHROUGH */ 2416 2417 default: 2418 goto out; 2419 } 2420 2421 if (host->ops->platform_execute_tuning) { 2422 err = host->ops->platform_execute_tuning(host, opcode); 2423 goto out; 2424 } 2425 2426 host->mmc->retune_period = tuning_count; 2427 2428 if (host->tuning_delay < 0) 2429 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2430 2431 sdhci_start_tuning(host); 2432 2433 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2434 2435 sdhci_end_tuning(host); 2436 out: 2437 host->flags &= ~SDHCI_HS400_TUNING; 2438 2439 return err; 2440 } 2441 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2442 2443 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2444 { 2445 /* Host Controller v3.00 defines preset value registers */ 2446 if (host->version < SDHCI_SPEC_300) 2447 return; 2448 2449 /* 2450 * We only enable or disable Preset Value if they are not already 2451 * enabled or disabled respectively. Otherwise, we bail out. 2452 */ 2453 if (host->preset_enabled != enable) { 2454 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2455 2456 if (enable) 2457 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2458 else 2459 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2460 2461 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2462 2463 if (enable) 2464 host->flags |= SDHCI_PV_ENABLED; 2465 else 2466 host->flags &= ~SDHCI_PV_ENABLED; 2467 2468 host->preset_enabled = enable; 2469 } 2470 } 2471 2472 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2473 int err) 2474 { 2475 struct sdhci_host *host = mmc_priv(mmc); 2476 struct mmc_data *data = mrq->data; 2477 2478 if (data->host_cookie != COOKIE_UNMAPPED) 2479 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 2480 mmc_get_dma_dir(data)); 2481 2482 data->host_cookie = COOKIE_UNMAPPED; 2483 } 2484 2485 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2486 { 2487 struct sdhci_host *host = mmc_priv(mmc); 2488 2489 mrq->data->host_cookie = COOKIE_UNMAPPED; 2490 2491 /* 2492 * No pre-mapping in the pre hook if we're using the bounce buffer, 2493 * for that we would need two bounce buffers since one buffer is 2494 * in flight when this is getting called. 2495 */ 2496 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2497 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2498 } 2499 2500 static inline bool sdhci_has_requests(struct sdhci_host *host) 2501 { 2502 return host->cmd || host->data_cmd; 2503 } 2504 2505 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2506 { 2507 if (host->data_cmd) { 2508 host->data_cmd->error = err; 2509 sdhci_finish_mrq(host, host->data_cmd->mrq); 2510 } 2511 2512 if (host->cmd) { 2513 host->cmd->error = err; 2514 sdhci_finish_mrq(host, host->cmd->mrq); 2515 } 2516 } 2517 2518 static void sdhci_card_event(struct mmc_host *mmc) 2519 { 2520 struct sdhci_host *host = mmc_priv(mmc); 2521 unsigned long flags; 2522 int present; 2523 2524 /* First check if client has provided their own card event */ 2525 if (host->ops->card_event) 2526 host->ops->card_event(host); 2527 2528 present = mmc->ops->get_cd(mmc); 2529 2530 spin_lock_irqsave(&host->lock, flags); 2531 2532 /* Check sdhci_has_requests() first in case we are runtime suspended */ 2533 if (sdhci_has_requests(host) && !present) { 2534 pr_err("%s: Card removed during transfer!\n", 2535 mmc_hostname(host->mmc)); 2536 pr_err("%s: Resetting controller.\n", 2537 mmc_hostname(host->mmc)); 2538 2539 sdhci_do_reset(host, SDHCI_RESET_CMD); 2540 sdhci_do_reset(host, SDHCI_RESET_DATA); 2541 2542 sdhci_error_out_mrqs(host, -ENOMEDIUM); 2543 } 2544 2545 spin_unlock_irqrestore(&host->lock, flags); 2546 } 2547 2548 static const struct mmc_host_ops sdhci_ops = { 2549 .request = sdhci_request, 2550 .post_req = sdhci_post_req, 2551 .pre_req = sdhci_pre_req, 2552 .set_ios = sdhci_set_ios, 2553 .get_cd = sdhci_get_cd, 2554 .get_ro = sdhci_get_ro, 2555 .hw_reset = sdhci_hw_reset, 2556 .enable_sdio_irq = sdhci_enable_sdio_irq, 2557 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2558 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2559 .execute_tuning = sdhci_execute_tuning, 2560 .card_event = sdhci_card_event, 2561 .card_busy = sdhci_card_busy, 2562 }; 2563 2564 /*****************************************************************************\ 2565 * * 2566 * Tasklets * 2567 * * 2568 \*****************************************************************************/ 2569 2570 static bool sdhci_request_done(struct sdhci_host *host) 2571 { 2572 unsigned long flags; 2573 struct mmc_request *mrq; 2574 int i; 2575 2576 spin_lock_irqsave(&host->lock, flags); 2577 2578 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 2579 mrq = host->mrqs_done[i]; 2580 if (mrq) 2581 break; 2582 } 2583 2584 if (!mrq) { 2585 spin_unlock_irqrestore(&host->lock, flags); 2586 return true; 2587 } 2588 2589 sdhci_del_timer(host, mrq); 2590 2591 /* 2592 * Always unmap the data buffers if they were mapped by 2593 * sdhci_prepare_data() whenever we finish with a request. 2594 * This avoids leaking DMA mappings on error. 2595 */ 2596 if (host->flags & SDHCI_REQ_USE_DMA) { 2597 struct mmc_data *data = mrq->data; 2598 2599 if (data && data->host_cookie == COOKIE_MAPPED) { 2600 if (host->bounce_buffer) { 2601 /* 2602 * On reads, copy the bounced data into the 2603 * sglist 2604 */ 2605 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 2606 unsigned int length = data->bytes_xfered; 2607 2608 if (length > host->bounce_buffer_size) { 2609 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 2610 mmc_hostname(host->mmc), 2611 host->bounce_buffer_size, 2612 data->bytes_xfered); 2613 /* Cap it down and continue */ 2614 length = host->bounce_buffer_size; 2615 } 2616 dma_sync_single_for_cpu( 2617 host->mmc->parent, 2618 host->bounce_addr, 2619 host->bounce_buffer_size, 2620 DMA_FROM_DEVICE); 2621 sg_copy_from_buffer(data->sg, 2622 data->sg_len, 2623 host->bounce_buffer, 2624 length); 2625 } else { 2626 /* No copying, just switch ownership */ 2627 dma_sync_single_for_cpu( 2628 host->mmc->parent, 2629 host->bounce_addr, 2630 host->bounce_buffer_size, 2631 mmc_get_dma_dir(data)); 2632 } 2633 } else { 2634 /* Unmap the raw data */ 2635 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 2636 data->sg_len, 2637 mmc_get_dma_dir(data)); 2638 } 2639 data->host_cookie = COOKIE_UNMAPPED; 2640 } 2641 } 2642 2643 /* 2644 * The controller needs a reset of internal state machines 2645 * upon error conditions. 2646 */ 2647 if (sdhci_needs_reset(host, mrq)) { 2648 /* 2649 * Do not finish until command and data lines are available for 2650 * reset. Note there can only be one other mrq, so it cannot 2651 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 2652 * would both be null. 2653 */ 2654 if (host->cmd || host->data_cmd) { 2655 spin_unlock_irqrestore(&host->lock, flags); 2656 return true; 2657 } 2658 2659 /* Some controllers need this kick or reset won't work here */ 2660 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2661 /* This is to force an update */ 2662 host->ops->set_clock(host, host->clock); 2663 2664 /* Spec says we should do both at the same time, but Ricoh 2665 controllers do not like that. */ 2666 sdhci_do_reset(host, SDHCI_RESET_CMD); 2667 sdhci_do_reset(host, SDHCI_RESET_DATA); 2668 2669 host->pending_reset = false; 2670 } 2671 2672 if (!sdhci_has_requests(host)) 2673 sdhci_led_deactivate(host); 2674 2675 host->mrqs_done[i] = NULL; 2676 2677 mmiowb(); 2678 spin_unlock_irqrestore(&host->lock, flags); 2679 2680 mmc_request_done(host->mmc, mrq); 2681 2682 return false; 2683 } 2684 2685 static void sdhci_tasklet_finish(unsigned long param) 2686 { 2687 struct sdhci_host *host = (struct sdhci_host *)param; 2688 2689 while (!sdhci_request_done(host)) 2690 ; 2691 } 2692 2693 static void sdhci_timeout_timer(struct timer_list *t) 2694 { 2695 struct sdhci_host *host; 2696 unsigned long flags; 2697 2698 host = from_timer(host, t, timer); 2699 2700 spin_lock_irqsave(&host->lock, flags); 2701 2702 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 2703 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 2704 mmc_hostname(host->mmc)); 2705 sdhci_dumpregs(host); 2706 2707 host->cmd->error = -ETIMEDOUT; 2708 sdhci_finish_mrq(host, host->cmd->mrq); 2709 } 2710 2711 mmiowb(); 2712 spin_unlock_irqrestore(&host->lock, flags); 2713 } 2714 2715 static void sdhci_timeout_data_timer(struct timer_list *t) 2716 { 2717 struct sdhci_host *host; 2718 unsigned long flags; 2719 2720 host = from_timer(host, t, data_timer); 2721 2722 spin_lock_irqsave(&host->lock, flags); 2723 2724 if (host->data || host->data_cmd || 2725 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 2726 pr_err("%s: Timeout waiting for hardware interrupt.\n", 2727 mmc_hostname(host->mmc)); 2728 sdhci_dumpregs(host); 2729 2730 if (host->data) { 2731 host->data->error = -ETIMEDOUT; 2732 sdhci_finish_data(host); 2733 } else if (host->data_cmd) { 2734 host->data_cmd->error = -ETIMEDOUT; 2735 sdhci_finish_mrq(host, host->data_cmd->mrq); 2736 } else { 2737 host->cmd->error = -ETIMEDOUT; 2738 sdhci_finish_mrq(host, host->cmd->mrq); 2739 } 2740 } 2741 2742 mmiowb(); 2743 spin_unlock_irqrestore(&host->lock, flags); 2744 } 2745 2746 /*****************************************************************************\ 2747 * * 2748 * Interrupt handling * 2749 * * 2750 \*****************************************************************************/ 2751 2752 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) 2753 { 2754 if (!host->cmd) { 2755 /* 2756 * SDHCI recovers from errors by resetting the cmd and data 2757 * circuits. Until that is done, there very well might be more 2758 * interrupts, so ignore them in that case. 2759 */ 2760 if (host->pending_reset) 2761 return; 2762 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 2763 mmc_hostname(host->mmc), (unsigned)intmask); 2764 sdhci_dumpregs(host); 2765 return; 2766 } 2767 2768 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 2769 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 2770 if (intmask & SDHCI_INT_TIMEOUT) 2771 host->cmd->error = -ETIMEDOUT; 2772 else 2773 host->cmd->error = -EILSEQ; 2774 2775 /* 2776 * If this command initiates a data phase and a response 2777 * CRC error is signalled, the card can start transferring 2778 * data - the card may have received the command without 2779 * error. We must not terminate the mmc_request early. 2780 * 2781 * If the card did not receive the command or returned an 2782 * error which prevented it sending data, the data phase 2783 * will time out. 2784 */ 2785 if (host->cmd->data && 2786 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 2787 SDHCI_INT_CRC) { 2788 host->cmd = NULL; 2789 return; 2790 } 2791 2792 sdhci_finish_mrq(host, host->cmd->mrq); 2793 return; 2794 } 2795 2796 if (intmask & SDHCI_INT_RESPONSE) 2797 sdhci_finish_command(host); 2798 } 2799 2800 static void sdhci_adma_show_error(struct sdhci_host *host) 2801 { 2802 void *desc = host->adma_table; 2803 2804 sdhci_dumpregs(host); 2805 2806 while (true) { 2807 struct sdhci_adma2_64_desc *dma_desc = desc; 2808 2809 if (host->flags & SDHCI_USE_64_BIT_DMA) 2810 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2811 desc, le32_to_cpu(dma_desc->addr_hi), 2812 le32_to_cpu(dma_desc->addr_lo), 2813 le16_to_cpu(dma_desc->len), 2814 le16_to_cpu(dma_desc->cmd)); 2815 else 2816 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2817 desc, le32_to_cpu(dma_desc->addr_lo), 2818 le16_to_cpu(dma_desc->len), 2819 le16_to_cpu(dma_desc->cmd)); 2820 2821 desc += host->desc_sz; 2822 2823 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2824 break; 2825 } 2826 } 2827 2828 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2829 { 2830 u32 command; 2831 2832 /* CMD19 generates _only_ Buffer Read Ready interrupt */ 2833 if (intmask & SDHCI_INT_DATA_AVAIL) { 2834 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 2835 if (command == MMC_SEND_TUNING_BLOCK || 2836 command == MMC_SEND_TUNING_BLOCK_HS200) { 2837 host->tuning_done = 1; 2838 wake_up(&host->buf_ready_int); 2839 return; 2840 } 2841 } 2842 2843 if (!host->data) { 2844 struct mmc_command *data_cmd = host->data_cmd; 2845 2846 /* 2847 * The "data complete" interrupt is also used to 2848 * indicate that a busy state has ended. See comment 2849 * above in sdhci_cmd_irq(). 2850 */ 2851 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 2852 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 2853 host->data_cmd = NULL; 2854 data_cmd->error = -ETIMEDOUT; 2855 sdhci_finish_mrq(host, data_cmd->mrq); 2856 return; 2857 } 2858 if (intmask & SDHCI_INT_DATA_END) { 2859 host->data_cmd = NULL; 2860 /* 2861 * Some cards handle busy-end interrupt 2862 * before the command completed, so make 2863 * sure we do things in the proper order. 2864 */ 2865 if (host->cmd == data_cmd) 2866 return; 2867 2868 sdhci_finish_mrq(host, data_cmd->mrq); 2869 return; 2870 } 2871 } 2872 2873 /* 2874 * SDHCI recovers from errors by resetting the cmd and data 2875 * circuits. Until that is done, there very well might be more 2876 * interrupts, so ignore them in that case. 2877 */ 2878 if (host->pending_reset) 2879 return; 2880 2881 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 2882 mmc_hostname(host->mmc), (unsigned)intmask); 2883 sdhci_dumpregs(host); 2884 2885 return; 2886 } 2887 2888 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2889 host->data->error = -ETIMEDOUT; 2890 else if (intmask & SDHCI_INT_DATA_END_BIT) 2891 host->data->error = -EILSEQ; 2892 else if ((intmask & SDHCI_INT_DATA_CRC) && 2893 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2894 != MMC_BUS_TEST_R) 2895 host->data->error = -EILSEQ; 2896 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2897 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2898 sdhci_adma_show_error(host); 2899 host->data->error = -EIO; 2900 if (host->ops->adma_workaround) 2901 host->ops->adma_workaround(host, intmask); 2902 } 2903 2904 if (host->data->error) 2905 sdhci_finish_data(host); 2906 else { 2907 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 2908 sdhci_transfer_pio(host); 2909 2910 /* 2911 * We currently don't do anything fancy with DMA 2912 * boundaries, but as we can't disable the feature 2913 * we need to at least restart the transfer. 2914 * 2915 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 2916 * should return a valid address to continue from, but as 2917 * some controllers are faulty, don't trust them. 2918 */ 2919 if (intmask & SDHCI_INT_DMA_END) { 2920 dma_addr_t dmastart, dmanow; 2921 2922 dmastart = sdhci_sdma_address(host); 2923 dmanow = dmastart + host->data->bytes_xfered; 2924 /* 2925 * Force update to the next DMA block boundary. 2926 */ 2927 dmanow = (dmanow & 2928 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 2929 SDHCI_DEFAULT_BOUNDARY_SIZE; 2930 host->data->bytes_xfered = dmanow - dmastart; 2931 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 2932 &dmastart, host->data->bytes_xfered, &dmanow); 2933 sdhci_set_sdma_addr(host, dmanow); 2934 } 2935 2936 if (intmask & SDHCI_INT_DATA_END) { 2937 if (host->cmd == host->data_cmd) { 2938 /* 2939 * Data managed to finish before the 2940 * command completed. Make sure we do 2941 * things in the proper order. 2942 */ 2943 host->data_early = 1; 2944 } else { 2945 sdhci_finish_data(host); 2946 } 2947 } 2948 } 2949 } 2950 2951 static irqreturn_t sdhci_irq(int irq, void *dev_id) 2952 { 2953 irqreturn_t result = IRQ_NONE; 2954 struct sdhci_host *host = dev_id; 2955 u32 intmask, mask, unexpected = 0; 2956 int max_loops = 16; 2957 2958 spin_lock(&host->lock); 2959 2960 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { 2961 spin_unlock(&host->lock); 2962 return IRQ_NONE; 2963 } 2964 2965 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 2966 if (!intmask || intmask == 0xffffffff) { 2967 result = IRQ_NONE; 2968 goto out; 2969 } 2970 2971 do { 2972 DBG("IRQ status 0x%08x\n", intmask); 2973 2974 if (host->ops->irq) { 2975 intmask = host->ops->irq(host, intmask); 2976 if (!intmask) 2977 goto cont; 2978 } 2979 2980 /* Clear selected interrupts. */ 2981 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 2982 SDHCI_INT_BUS_POWER); 2983 sdhci_writel(host, mask, SDHCI_INT_STATUS); 2984 2985 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2986 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 2987 SDHCI_CARD_PRESENT; 2988 2989 /* 2990 * There is a observation on i.mx esdhc. INSERT 2991 * bit will be immediately set again when it gets 2992 * cleared, if a card is inserted. We have to mask 2993 * the irq to prevent interrupt storm which will 2994 * freeze the system. And the REMOVE gets the 2995 * same situation. 2996 * 2997 * More testing are needed here to ensure it works 2998 * for other platforms though. 2999 */ 3000 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3001 SDHCI_INT_CARD_REMOVE); 3002 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3003 SDHCI_INT_CARD_INSERT; 3004 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3005 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3006 3007 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3008 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3009 3010 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3011 SDHCI_INT_CARD_REMOVE); 3012 result = IRQ_WAKE_THREAD; 3013 } 3014 3015 if (intmask & SDHCI_INT_CMD_MASK) 3016 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 3017 3018 if (intmask & SDHCI_INT_DATA_MASK) 3019 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3020 3021 if (intmask & SDHCI_INT_BUS_POWER) 3022 pr_err("%s: Card is consuming too much power!\n", 3023 mmc_hostname(host->mmc)); 3024 3025 if (intmask & SDHCI_INT_RETUNE) 3026 mmc_retune_needed(host->mmc); 3027 3028 if ((intmask & SDHCI_INT_CARD_INT) && 3029 (host->ier & SDHCI_INT_CARD_INT)) { 3030 sdhci_enable_sdio_irq_nolock(host, false); 3031 host->thread_isr |= SDHCI_INT_CARD_INT; 3032 result = IRQ_WAKE_THREAD; 3033 } 3034 3035 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3036 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3037 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3038 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3039 3040 if (intmask) { 3041 unexpected |= intmask; 3042 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3043 } 3044 cont: 3045 if (result == IRQ_NONE) 3046 result = IRQ_HANDLED; 3047 3048 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3049 } while (intmask && --max_loops); 3050 out: 3051 spin_unlock(&host->lock); 3052 3053 if (unexpected) { 3054 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3055 mmc_hostname(host->mmc), unexpected); 3056 sdhci_dumpregs(host); 3057 } 3058 3059 return result; 3060 } 3061 3062 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3063 { 3064 struct sdhci_host *host = dev_id; 3065 unsigned long flags; 3066 u32 isr; 3067 3068 spin_lock_irqsave(&host->lock, flags); 3069 isr = host->thread_isr; 3070 host->thread_isr = 0; 3071 spin_unlock_irqrestore(&host->lock, flags); 3072 3073 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3074 struct mmc_host *mmc = host->mmc; 3075 3076 mmc->ops->card_event(mmc); 3077 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3078 } 3079 3080 if (isr & SDHCI_INT_CARD_INT) { 3081 sdio_run_irqs(host->mmc); 3082 3083 spin_lock_irqsave(&host->lock, flags); 3084 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 3085 sdhci_enable_sdio_irq_nolock(host, true); 3086 spin_unlock_irqrestore(&host->lock, flags); 3087 } 3088 3089 return isr ? IRQ_HANDLED : IRQ_NONE; 3090 } 3091 3092 /*****************************************************************************\ 3093 * * 3094 * Suspend/resume * 3095 * * 3096 \*****************************************************************************/ 3097 3098 #ifdef CONFIG_PM 3099 3100 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3101 { 3102 return mmc_card_is_removable(host->mmc) && 3103 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3104 !mmc_can_gpio_cd(host->mmc); 3105 } 3106 3107 /* 3108 * To enable wakeup events, the corresponding events have to be enabled in 3109 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3110 * Table' in the SD Host Controller Standard Specification. 3111 * It is useless to restore SDHCI_INT_ENABLE state in 3112 * sdhci_disable_irq_wakeups() since it will be set by 3113 * sdhci_enable_card_detection() or sdhci_init(). 3114 */ 3115 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3116 { 3117 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3118 SDHCI_WAKE_ON_INT; 3119 u32 irq_val = 0; 3120 u8 wake_val = 0; 3121 u8 val; 3122 3123 if (sdhci_cd_irq_can_wakeup(host)) { 3124 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3125 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3126 } 3127 3128 if (mmc_card_wake_sdio_irq(host->mmc)) { 3129 wake_val |= SDHCI_WAKE_ON_INT; 3130 irq_val |= SDHCI_INT_CARD_INT; 3131 } 3132 3133 if (!irq_val) 3134 return false; 3135 3136 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3137 val &= ~mask; 3138 val |= wake_val; 3139 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3140 3141 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3142 3143 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3144 3145 return host->irq_wake_enabled; 3146 } 3147 3148 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3149 { 3150 u8 val; 3151 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3152 | SDHCI_WAKE_ON_INT; 3153 3154 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3155 val &= ~mask; 3156 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3157 3158 disable_irq_wake(host->irq); 3159 3160 host->irq_wake_enabled = false; 3161 } 3162 3163 int sdhci_suspend_host(struct sdhci_host *host) 3164 { 3165 sdhci_disable_card_detection(host); 3166 3167 mmc_retune_timer_stop(host->mmc); 3168 3169 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3170 !sdhci_enable_irq_wakeups(host)) { 3171 host->ier = 0; 3172 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3173 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3174 free_irq(host->irq, host); 3175 } 3176 3177 return 0; 3178 } 3179 3180 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3181 3182 int sdhci_resume_host(struct sdhci_host *host) 3183 { 3184 struct mmc_host *mmc = host->mmc; 3185 int ret = 0; 3186 3187 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3188 if (host->ops->enable_dma) 3189 host->ops->enable_dma(host); 3190 } 3191 3192 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 3193 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3194 /* Card keeps power but host controller does not */ 3195 sdhci_init(host, 0); 3196 host->pwr = 0; 3197 host->clock = 0; 3198 mmc->ops->set_ios(mmc, &mmc->ios); 3199 } else { 3200 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 3201 mmiowb(); 3202 } 3203 3204 if (host->irq_wake_enabled) { 3205 sdhci_disable_irq_wakeups(host); 3206 } else { 3207 ret = request_threaded_irq(host->irq, sdhci_irq, 3208 sdhci_thread_irq, IRQF_SHARED, 3209 mmc_hostname(host->mmc), host); 3210 if (ret) 3211 return ret; 3212 } 3213 3214 sdhci_enable_card_detection(host); 3215 3216 return ret; 3217 } 3218 3219 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3220 3221 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3222 { 3223 unsigned long flags; 3224 3225 mmc_retune_timer_stop(host->mmc); 3226 3227 spin_lock_irqsave(&host->lock, flags); 3228 host->ier &= SDHCI_INT_CARD_INT; 3229 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3230 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3231 spin_unlock_irqrestore(&host->lock, flags); 3232 3233 synchronize_hardirq(host->irq); 3234 3235 spin_lock_irqsave(&host->lock, flags); 3236 host->runtime_suspended = true; 3237 spin_unlock_irqrestore(&host->lock, flags); 3238 3239 return 0; 3240 } 3241 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3242 3243 int sdhci_runtime_resume_host(struct sdhci_host *host) 3244 { 3245 struct mmc_host *mmc = host->mmc; 3246 unsigned long flags; 3247 int host_flags = host->flags; 3248 3249 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3250 if (host->ops->enable_dma) 3251 host->ops->enable_dma(host); 3252 } 3253 3254 sdhci_init(host, 0); 3255 3256 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3257 mmc->ios.power_mode != MMC_POWER_OFF) { 3258 /* Force clock and power re-program */ 3259 host->pwr = 0; 3260 host->clock = 0; 3261 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3262 mmc->ops->set_ios(mmc, &mmc->ios); 3263 3264 if ((host_flags & SDHCI_PV_ENABLED) && 3265 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3266 spin_lock_irqsave(&host->lock, flags); 3267 sdhci_enable_preset_value(host, true); 3268 spin_unlock_irqrestore(&host->lock, flags); 3269 } 3270 3271 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3272 mmc->ops->hs400_enhanced_strobe) 3273 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3274 } 3275 3276 spin_lock_irqsave(&host->lock, flags); 3277 3278 host->runtime_suspended = false; 3279 3280 /* Enable SDIO IRQ */ 3281 if (host->flags & SDHCI_SDIO_IRQ_ENABLED) 3282 sdhci_enable_sdio_irq_nolock(host, true); 3283 3284 /* Enable Card Detection */ 3285 sdhci_enable_card_detection(host); 3286 3287 spin_unlock_irqrestore(&host->lock, flags); 3288 3289 return 0; 3290 } 3291 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3292 3293 #endif /* CONFIG_PM */ 3294 3295 /*****************************************************************************\ 3296 * * 3297 * Command Queue Engine (CQE) helpers * 3298 * * 3299 \*****************************************************************************/ 3300 3301 void sdhci_cqe_enable(struct mmc_host *mmc) 3302 { 3303 struct sdhci_host *host = mmc_priv(mmc); 3304 unsigned long flags; 3305 u8 ctrl; 3306 3307 spin_lock_irqsave(&host->lock, flags); 3308 3309 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3310 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3311 if (host->flags & SDHCI_USE_64_BIT_DMA) 3312 ctrl |= SDHCI_CTRL_ADMA64; 3313 else 3314 ctrl |= SDHCI_CTRL_ADMA32; 3315 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3316 3317 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3318 SDHCI_BLOCK_SIZE); 3319 3320 /* Set maximum timeout */ 3321 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL); 3322 3323 host->ier = host->cqe_ier; 3324 3325 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3326 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3327 3328 host->cqe_on = true; 3329 3330 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3331 mmc_hostname(mmc), host->ier, 3332 sdhci_readl(host, SDHCI_INT_STATUS)); 3333 3334 mmiowb(); 3335 spin_unlock_irqrestore(&host->lock, flags); 3336 } 3337 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3338 3339 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3340 { 3341 struct sdhci_host *host = mmc_priv(mmc); 3342 unsigned long flags; 3343 3344 spin_lock_irqsave(&host->lock, flags); 3345 3346 sdhci_set_default_irqs(host); 3347 3348 host->cqe_on = false; 3349 3350 if (recovery) { 3351 sdhci_do_reset(host, SDHCI_RESET_CMD); 3352 sdhci_do_reset(host, SDHCI_RESET_DATA); 3353 } 3354 3355 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3356 mmc_hostname(mmc), host->ier, 3357 sdhci_readl(host, SDHCI_INT_STATUS)); 3358 3359 mmiowb(); 3360 spin_unlock_irqrestore(&host->lock, flags); 3361 } 3362 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3363 3364 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3365 int *data_error) 3366 { 3367 u32 mask; 3368 3369 if (!host->cqe_on) 3370 return false; 3371 3372 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) 3373 *cmd_error = -EILSEQ; 3374 else if (intmask & SDHCI_INT_TIMEOUT) 3375 *cmd_error = -ETIMEDOUT; 3376 else 3377 *cmd_error = 0; 3378 3379 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) 3380 *data_error = -EILSEQ; 3381 else if (intmask & SDHCI_INT_DATA_TIMEOUT) 3382 *data_error = -ETIMEDOUT; 3383 else if (intmask & SDHCI_INT_ADMA_ERROR) 3384 *data_error = -EIO; 3385 else 3386 *data_error = 0; 3387 3388 /* Clear selected interrupts. */ 3389 mask = intmask & host->cqe_ier; 3390 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3391 3392 if (intmask & SDHCI_INT_BUS_POWER) 3393 pr_err("%s: Card is consuming too much power!\n", 3394 mmc_hostname(host->mmc)); 3395 3396 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3397 if (intmask) { 3398 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3399 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3400 mmc_hostname(host->mmc), intmask); 3401 sdhci_dumpregs(host); 3402 } 3403 3404 return true; 3405 } 3406 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3407 3408 /*****************************************************************************\ 3409 * * 3410 * Device allocation/registration * 3411 * * 3412 \*****************************************************************************/ 3413 3414 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3415 size_t priv_size) 3416 { 3417 struct mmc_host *mmc; 3418 struct sdhci_host *host; 3419 3420 WARN_ON(dev == NULL); 3421 3422 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 3423 if (!mmc) 3424 return ERR_PTR(-ENOMEM); 3425 3426 host = mmc_priv(mmc); 3427 host->mmc = mmc; 3428 host->mmc_host_ops = sdhci_ops; 3429 mmc->ops = &host->mmc_host_ops; 3430 3431 host->flags = SDHCI_SIGNALING_330; 3432 3433 host->cqe_ier = SDHCI_CQE_INT_MASK; 3434 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 3435 3436 host->tuning_delay = -1; 3437 3438 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 3439 3440 /* 3441 * The DMA table descriptor count is calculated as the maximum 3442 * number of segments times 2, to allow for an alignment 3443 * descriptor for each segment, plus 1 for a nop end descriptor. 3444 */ 3445 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 3446 3447 return host; 3448 } 3449 3450 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 3451 3452 static int sdhci_set_dma_mask(struct sdhci_host *host) 3453 { 3454 struct mmc_host *mmc = host->mmc; 3455 struct device *dev = mmc_dev(mmc); 3456 int ret = -EINVAL; 3457 3458 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 3459 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3460 3461 /* Try 64-bit mask if hardware is capable of it */ 3462 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3463 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3464 if (ret) { 3465 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 3466 mmc_hostname(mmc)); 3467 host->flags &= ~SDHCI_USE_64_BIT_DMA; 3468 } 3469 } 3470 3471 /* 32-bit mask as default & fallback */ 3472 if (ret) { 3473 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3474 if (ret) 3475 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 3476 mmc_hostname(mmc)); 3477 } 3478 3479 return ret; 3480 } 3481 3482 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) 3483 { 3484 u16 v; 3485 u64 dt_caps_mask = 0; 3486 u64 dt_caps = 0; 3487 3488 if (host->read_caps) 3489 return; 3490 3491 host->read_caps = true; 3492 3493 if (debug_quirks) 3494 host->quirks = debug_quirks; 3495 3496 if (debug_quirks2) 3497 host->quirks2 = debug_quirks2; 3498 3499 sdhci_do_reset(host, SDHCI_RESET_ALL); 3500 3501 if (host->v4_mode) 3502 sdhci_do_enable_v4_mode(host); 3503 3504 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3505 "sdhci-caps-mask", &dt_caps_mask); 3506 of_property_read_u64(mmc_dev(host->mmc)->of_node, 3507 "sdhci-caps", &dt_caps); 3508 3509 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 3510 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 3511 3512 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 3513 return; 3514 3515 if (caps) { 3516 host->caps = *caps; 3517 } else { 3518 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 3519 host->caps &= ~lower_32_bits(dt_caps_mask); 3520 host->caps |= lower_32_bits(dt_caps); 3521 } 3522 3523 if (host->version < SDHCI_SPEC_300) 3524 return; 3525 3526 if (caps1) { 3527 host->caps1 = *caps1; 3528 } else { 3529 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 3530 host->caps1 &= ~upper_32_bits(dt_caps_mask); 3531 host->caps1 |= upper_32_bits(dt_caps); 3532 } 3533 } 3534 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 3535 3536 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) 3537 { 3538 struct mmc_host *mmc = host->mmc; 3539 unsigned int max_blocks; 3540 unsigned int bounce_size; 3541 int ret; 3542 3543 /* 3544 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 3545 * has diminishing returns, this is probably because SD/MMC 3546 * cards are usually optimized to handle this size of requests. 3547 */ 3548 bounce_size = SZ_64K; 3549 /* 3550 * Adjust downwards to maximum request size if this is less 3551 * than our segment size, else hammer down the maximum 3552 * request size to the maximum buffer size. 3553 */ 3554 if (mmc->max_req_size < bounce_size) 3555 bounce_size = mmc->max_req_size; 3556 max_blocks = bounce_size / 512; 3557 3558 /* 3559 * When we just support one segment, we can get significant 3560 * speedups by the help of a bounce buffer to group scattered 3561 * reads/writes together. 3562 */ 3563 host->bounce_buffer = devm_kmalloc(mmc->parent, 3564 bounce_size, 3565 GFP_KERNEL); 3566 if (!host->bounce_buffer) { 3567 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 3568 mmc_hostname(mmc), 3569 bounce_size); 3570 /* 3571 * Exiting with zero here makes sure we proceed with 3572 * mmc->max_segs == 1. 3573 */ 3574 return 0; 3575 } 3576 3577 host->bounce_addr = dma_map_single(mmc->parent, 3578 host->bounce_buffer, 3579 bounce_size, 3580 DMA_BIDIRECTIONAL); 3581 ret = dma_mapping_error(mmc->parent, host->bounce_addr); 3582 if (ret) 3583 /* Again fall back to max_segs == 1 */ 3584 return 0; 3585 host->bounce_buffer_size = bounce_size; 3586 3587 /* Lie about this since we're bouncing */ 3588 mmc->max_segs = max_blocks; 3589 mmc->max_seg_size = bounce_size; 3590 mmc->max_req_size = bounce_size; 3591 3592 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 3593 mmc_hostname(mmc), max_blocks, bounce_size); 3594 3595 return 0; 3596 } 3597 3598 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 3599 { 3600 /* 3601 * According to SD Host Controller spec v4.10, bit[27] added from 3602 * version 4.10 in Capabilities Register is used as 64-bit System 3603 * Address support for V4 mode. 3604 */ 3605 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 3606 return host->caps & SDHCI_CAN_64BIT_V4; 3607 3608 return host->caps & SDHCI_CAN_64BIT; 3609 } 3610 3611 int sdhci_setup_host(struct sdhci_host *host) 3612 { 3613 struct mmc_host *mmc; 3614 u32 max_current_caps; 3615 unsigned int ocr_avail; 3616 unsigned int override_timeout_clk; 3617 u32 max_clk; 3618 int ret; 3619 3620 WARN_ON(host == NULL); 3621 if (host == NULL) 3622 return -EINVAL; 3623 3624 mmc = host->mmc; 3625 3626 /* 3627 * If there are external regulators, get them. Note this must be done 3628 * early before resetting the host and reading the capabilities so that 3629 * the host can take the appropriate action if regulators are not 3630 * available. 3631 */ 3632 ret = mmc_regulator_get_supply(mmc); 3633 if (ret) 3634 return ret; 3635 3636 DBG("Version: 0x%08x | Present: 0x%08x\n", 3637 sdhci_readw(host, SDHCI_HOST_VERSION), 3638 sdhci_readl(host, SDHCI_PRESENT_STATE)); 3639 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 3640 sdhci_readl(host, SDHCI_CAPABILITIES), 3641 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 3642 3643 sdhci_read_caps(host); 3644 3645 override_timeout_clk = host->timeout_clk; 3646 3647 if (host->version > SDHCI_SPEC_420) { 3648 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 3649 mmc_hostname(mmc), host->version); 3650 } 3651 3652 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 3653 host->flags |= SDHCI_USE_SDMA; 3654 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 3655 DBG("Controller doesn't have SDMA capability\n"); 3656 else 3657 host->flags |= SDHCI_USE_SDMA; 3658 3659 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 3660 (host->flags & SDHCI_USE_SDMA)) { 3661 DBG("Disabling DMA as it is marked broken\n"); 3662 host->flags &= ~SDHCI_USE_SDMA; 3663 } 3664 3665 if ((host->version >= SDHCI_SPEC_200) && 3666 (host->caps & SDHCI_CAN_DO_ADMA2)) 3667 host->flags |= SDHCI_USE_ADMA; 3668 3669 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 3670 (host->flags & SDHCI_USE_ADMA)) { 3671 DBG("Disabling ADMA as it is marked broken\n"); 3672 host->flags &= ~SDHCI_USE_ADMA; 3673 } 3674 3675 /* 3676 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask 3677 * and *must* do 64-bit DMA. A driver has the opportunity to change 3678 * that during the first call to ->enable_dma(). Similarly 3679 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to 3680 * implement. 3681 */ 3682 if (sdhci_can_64bit_dma(host)) 3683 host->flags |= SDHCI_USE_64_BIT_DMA; 3684 3685 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3686 ret = sdhci_set_dma_mask(host); 3687 3688 if (!ret && host->ops->enable_dma) 3689 ret = host->ops->enable_dma(host); 3690 3691 if (ret) { 3692 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 3693 mmc_hostname(mmc)); 3694 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 3695 3696 ret = 0; 3697 } 3698 } 3699 3700 /* SDMA does not support 64-bit DMA if v4 mode not set */ 3701 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 3702 host->flags &= ~SDHCI_USE_SDMA; 3703 3704 if (host->flags & SDHCI_USE_ADMA) { 3705 dma_addr_t dma; 3706 void *buf; 3707 3708 if (host->flags & SDHCI_USE_64_BIT_DMA) { 3709 host->adma_table_sz = host->adma_table_cnt * 3710 SDHCI_ADMA2_64_DESC_SZ(host); 3711 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 3712 } else { 3713 host->adma_table_sz = host->adma_table_cnt * 3714 SDHCI_ADMA2_32_DESC_SZ; 3715 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; 3716 } 3717 3718 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 3719 /* 3720 * Use zalloc to zero the reserved high 32-bits of 128-bit 3721 * descriptors so that they never need to be written. 3722 */ 3723 buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3724 host->adma_table_sz, &dma, GFP_KERNEL); 3725 if (!buf) { 3726 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3727 mmc_hostname(mmc)); 3728 host->flags &= ~SDHCI_USE_ADMA; 3729 } else if ((dma + host->align_buffer_sz) & 3730 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 3731 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 3732 mmc_hostname(mmc)); 3733 host->flags &= ~SDHCI_USE_ADMA; 3734 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 3735 host->adma_table_sz, buf, dma); 3736 } else { 3737 host->align_buffer = buf; 3738 host->align_addr = dma; 3739 3740 host->adma_table = buf + host->align_buffer_sz; 3741 host->adma_addr = dma + host->align_buffer_sz; 3742 } 3743 } 3744 3745 /* 3746 * If we use DMA, then it's up to the caller to set the DMA 3747 * mask, but PIO does not need the hw shim so we set a new 3748 * mask here in that case. 3749 */ 3750 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 3751 host->dma_mask = DMA_BIT_MASK(64); 3752 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3753 } 3754 3755 if (host->version >= SDHCI_SPEC_300) 3756 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) 3757 >> SDHCI_CLOCK_BASE_SHIFT; 3758 else 3759 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK) 3760 >> SDHCI_CLOCK_BASE_SHIFT; 3761 3762 host->max_clk *= 1000000; 3763 if (host->max_clk == 0 || host->quirks & 3764 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 3765 if (!host->ops->get_max_clock) { 3766 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 3767 mmc_hostname(mmc)); 3768 ret = -ENODEV; 3769 goto undma; 3770 } 3771 host->max_clk = host->ops->get_max_clock(host); 3772 } 3773 3774 /* 3775 * In case of Host Controller v3.00, find out whether clock 3776 * multiplier is supported. 3777 */ 3778 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >> 3779 SDHCI_CLOCK_MUL_SHIFT; 3780 3781 /* 3782 * In case the value in Clock Multiplier is 0, then programmable 3783 * clock mode is not supported, otherwise the actual clock 3784 * multiplier is one more than the value of Clock Multiplier 3785 * in the Capabilities Register. 3786 */ 3787 if (host->clk_mul) 3788 host->clk_mul += 1; 3789 3790 /* 3791 * Set host parameters. 3792 */ 3793 max_clk = host->max_clk; 3794 3795 if (host->ops->get_min_clock) 3796 mmc->f_min = host->ops->get_min_clock(host); 3797 else if (host->version >= SDHCI_SPEC_300) { 3798 if (host->clk_mul) { 3799 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3800 max_clk = host->max_clk * host->clk_mul; 3801 } else 3802 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3803 } else 3804 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3805 3806 if (!mmc->f_max || mmc->f_max > max_clk) 3807 mmc->f_max = max_clk; 3808 3809 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3810 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >> 3811 SDHCI_TIMEOUT_CLK_SHIFT; 3812 3813 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 3814 host->timeout_clk *= 1000; 3815 3816 if (host->timeout_clk == 0) { 3817 if (!host->ops->get_timeout_clock) { 3818 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 3819 mmc_hostname(mmc)); 3820 ret = -ENODEV; 3821 goto undma; 3822 } 3823 3824 host->timeout_clk = 3825 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 3826 1000); 3827 } 3828 3829 if (override_timeout_clk) 3830 host->timeout_clk = override_timeout_clk; 3831 3832 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 3833 host->ops->get_max_timeout_count(host) : 1 << 27; 3834 mmc->max_busy_timeout /= host->timeout_clk; 3835 } 3836 3837 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 3838 !host->ops->get_max_timeout_count) 3839 mmc->max_busy_timeout = 0; 3840 3841 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 3842 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 3843 3844 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 3845 host->flags |= SDHCI_AUTO_CMD12; 3846 3847 /* 3848 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 3849 * For v4 mode, SDMA may use Auto-CMD23 as well. 3850 */ 3851 if ((host->version >= SDHCI_SPEC_300) && 3852 ((host->flags & SDHCI_USE_ADMA) || 3853 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 3854 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 3855 host->flags |= SDHCI_AUTO_CMD23; 3856 DBG("Auto-CMD23 available\n"); 3857 } else { 3858 DBG("Auto-CMD23 unavailable\n"); 3859 } 3860 3861 /* 3862 * A controller may support 8-bit width, but the board itself 3863 * might not have the pins brought out. Boards that support 3864 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 3865 * their platform code before calling sdhci_add_host(), and we 3866 * won't assume 8-bit width for hosts without that CAP. 3867 */ 3868 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 3869 mmc->caps |= MMC_CAP_4_BIT_DATA; 3870 3871 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 3872 mmc->caps &= ~MMC_CAP_CMD23; 3873 3874 if (host->caps & SDHCI_CAN_DO_HISPD) 3875 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 3876 3877 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3878 mmc_card_is_removable(mmc) && 3879 mmc_gpio_get_cd(host->mmc) < 0) 3880 mmc->caps |= MMC_CAP_NEEDS_POLL; 3881 3882 if (!IS_ERR(mmc->supply.vqmmc)) { 3883 ret = regulator_enable(mmc->supply.vqmmc); 3884 3885 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 3886 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 3887 1950000)) 3888 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 3889 SDHCI_SUPPORT_SDR50 | 3890 SDHCI_SUPPORT_DDR50); 3891 3892 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 3893 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 3894 3600000)) 3895 host->flags &= ~SDHCI_SIGNALING_330; 3896 3897 if (ret) { 3898 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3899 mmc_hostname(mmc), ret); 3900 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 3901 } 3902 } 3903 3904 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 3905 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3906 SDHCI_SUPPORT_DDR50); 3907 /* 3908 * The SDHCI controller in a SoC might support HS200/HS400 3909 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 3910 * but if the board is modeled such that the IO lines are not 3911 * connected to 1.8v then HS200/HS400 cannot be supported. 3912 * Disable HS200/HS400 if the board does not have 1.8v connected 3913 * to the IO lines. (Applicable for other modes in 1.8v) 3914 */ 3915 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 3916 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 3917 } 3918 3919 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 3920 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 3921 SDHCI_SUPPORT_DDR50)) 3922 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 3923 3924 /* SDR104 supports also implies SDR50 support */ 3925 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 3926 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 3927 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3928 * field can be promoted to support HS200. 3929 */ 3930 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 3931 mmc->caps2 |= MMC_CAP2_HS200; 3932 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 3933 mmc->caps |= MMC_CAP_UHS_SDR50; 3934 } 3935 3936 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 3937 (host->caps1 & SDHCI_SUPPORT_HS400)) 3938 mmc->caps2 |= MMC_CAP2_HS400; 3939 3940 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 3941 (IS_ERR(mmc->supply.vqmmc) || 3942 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 3943 1300000))) 3944 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 3945 3946 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 3947 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3948 mmc->caps |= MMC_CAP_UHS_DDR50; 3949 3950 /* Does the host need tuning for SDR50? */ 3951 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 3952 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 3953 3954 /* Driver Type(s) (A, C, D) supported by the host */ 3955 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 3956 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 3957 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 3958 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 3959 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 3960 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 3961 3962 /* Initial value for re-tuning timer count */ 3963 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 3964 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 3965 3966 /* 3967 * In case Re-tuning Timer is not disabled, the actual value of 3968 * re-tuning timer will be 2 ^ (n - 1). 3969 */ 3970 if (host->tuning_count) 3971 host->tuning_count = 1 << (host->tuning_count - 1); 3972 3973 /* Re-tuning mode supported by the Host Controller */ 3974 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >> 3975 SDHCI_RETUNING_MODE_SHIFT; 3976 3977 ocr_avail = 0; 3978 3979 /* 3980 * According to SD Host Controller spec v3.00, if the Host System 3981 * can afford more than 150mA, Host Driver should set XPC to 1. Also 3982 * the value is meaningful only if Voltage Support in the Capabilities 3983 * register is set. The actual current value is 4 times the register 3984 * value. 3985 */ 3986 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 3987 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 3988 int curr = regulator_get_current_limit(mmc->supply.vmmc); 3989 if (curr > 0) { 3990 3991 /* convert to SDHCI_MAX_CURRENT format */ 3992 curr = curr/1000; /* convert to mA */ 3993 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 3994 3995 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 3996 max_current_caps = 3997 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | 3998 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | 3999 (curr << SDHCI_MAX_CURRENT_180_SHIFT); 4000 } 4001 } 4002 4003 if (host->caps & SDHCI_CAN_VDD_330) { 4004 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4005 4006 mmc->max_current_330 = ((max_current_caps & 4007 SDHCI_MAX_CURRENT_330_MASK) >> 4008 SDHCI_MAX_CURRENT_330_SHIFT) * 4009 SDHCI_MAX_CURRENT_MULTIPLIER; 4010 } 4011 if (host->caps & SDHCI_CAN_VDD_300) { 4012 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4013 4014 mmc->max_current_300 = ((max_current_caps & 4015 SDHCI_MAX_CURRENT_300_MASK) >> 4016 SDHCI_MAX_CURRENT_300_SHIFT) * 4017 SDHCI_MAX_CURRENT_MULTIPLIER; 4018 } 4019 if (host->caps & SDHCI_CAN_VDD_180) { 4020 ocr_avail |= MMC_VDD_165_195; 4021 4022 mmc->max_current_180 = ((max_current_caps & 4023 SDHCI_MAX_CURRENT_180_MASK) >> 4024 SDHCI_MAX_CURRENT_180_SHIFT) * 4025 SDHCI_MAX_CURRENT_MULTIPLIER; 4026 } 4027 4028 /* If OCR set by host, use it instead. */ 4029 if (host->ocr_mask) 4030 ocr_avail = host->ocr_mask; 4031 4032 /* If OCR set by external regulators, give it highest prio. */ 4033 if (mmc->ocr_avail) 4034 ocr_avail = mmc->ocr_avail; 4035 4036 mmc->ocr_avail = ocr_avail; 4037 mmc->ocr_avail_sdio = ocr_avail; 4038 if (host->ocr_avail_sdio) 4039 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4040 mmc->ocr_avail_sd = ocr_avail; 4041 if (host->ocr_avail_sd) 4042 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4043 else /* normal SD controllers don't support 1.8V */ 4044 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4045 mmc->ocr_avail_mmc = ocr_avail; 4046 if (host->ocr_avail_mmc) 4047 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4048 4049 if (mmc->ocr_avail == 0) { 4050 pr_err("%s: Hardware doesn't report any support voltages.\n", 4051 mmc_hostname(mmc)); 4052 ret = -ENODEV; 4053 goto unreg; 4054 } 4055 4056 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4057 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4058 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4059 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4060 host->flags |= SDHCI_SIGNALING_180; 4061 4062 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4063 host->flags |= SDHCI_SIGNALING_120; 4064 4065 spin_lock_init(&host->lock); 4066 4067 /* 4068 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4069 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4070 * is less anyway. 4071 */ 4072 mmc->max_req_size = 524288; 4073 4074 /* 4075 * Maximum number of segments. Depends on if the hardware 4076 * can do scatter/gather or not. 4077 */ 4078 if (host->flags & SDHCI_USE_ADMA) { 4079 mmc->max_segs = SDHCI_MAX_SEGS; 4080 } else if (host->flags & SDHCI_USE_SDMA) { 4081 mmc->max_segs = 1; 4082 if (swiotlb_max_segment()) { 4083 unsigned int max_req_size = (1 << IO_TLB_SHIFT) * 4084 IO_TLB_SEGSIZE; 4085 mmc->max_req_size = min(mmc->max_req_size, 4086 max_req_size); 4087 } 4088 } else { /* PIO */ 4089 mmc->max_segs = SDHCI_MAX_SEGS; 4090 } 4091 4092 /* 4093 * Maximum segment size. Could be one segment with the maximum number 4094 * of bytes. When doing hardware scatter/gather, each entry cannot 4095 * be larger than 64 KiB though. 4096 */ 4097 if (host->flags & SDHCI_USE_ADMA) { 4098 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 4099 mmc->max_seg_size = 65535; 4100 else 4101 mmc->max_seg_size = 65536; 4102 } else { 4103 mmc->max_seg_size = mmc->max_req_size; 4104 } 4105 4106 /* 4107 * Maximum block size. This varies from controller to controller and 4108 * is specified in the capabilities register. 4109 */ 4110 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4111 mmc->max_blk_size = 2; 4112 } else { 4113 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4114 SDHCI_MAX_BLOCK_SHIFT; 4115 if (mmc->max_blk_size >= 3) { 4116 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4117 mmc_hostname(mmc)); 4118 mmc->max_blk_size = 0; 4119 } 4120 } 4121 4122 mmc->max_blk_size = 512 << mmc->max_blk_size; 4123 4124 /* 4125 * Maximum block count. 4126 */ 4127 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4128 4129 if (mmc->max_segs == 1) { 4130 /* This may alter mmc->*_blk_* parameters */ 4131 ret = sdhci_allocate_bounce_buffer(host); 4132 if (ret) 4133 return ret; 4134 } 4135 4136 return 0; 4137 4138 unreg: 4139 if (!IS_ERR(mmc->supply.vqmmc)) 4140 regulator_disable(mmc->supply.vqmmc); 4141 undma: 4142 if (host->align_buffer) 4143 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4144 host->adma_table_sz, host->align_buffer, 4145 host->align_addr); 4146 host->adma_table = NULL; 4147 host->align_buffer = NULL; 4148 4149 return ret; 4150 } 4151 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4152 4153 void sdhci_cleanup_host(struct sdhci_host *host) 4154 { 4155 struct mmc_host *mmc = host->mmc; 4156 4157 if (!IS_ERR(mmc->supply.vqmmc)) 4158 regulator_disable(mmc->supply.vqmmc); 4159 4160 if (host->align_buffer) 4161 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4162 host->adma_table_sz, host->align_buffer, 4163 host->align_addr); 4164 host->adma_table = NULL; 4165 host->align_buffer = NULL; 4166 } 4167 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4168 4169 int __sdhci_add_host(struct sdhci_host *host) 4170 { 4171 struct mmc_host *mmc = host->mmc; 4172 int ret; 4173 4174 /* 4175 * Init tasklets. 4176 */ 4177 tasklet_init(&host->finish_tasklet, 4178 sdhci_tasklet_finish, (unsigned long)host); 4179 4180 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4181 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4182 4183 init_waitqueue_head(&host->buf_ready_int); 4184 4185 sdhci_init(host, 0); 4186 4187 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4188 IRQF_SHARED, mmc_hostname(mmc), host); 4189 if (ret) { 4190 pr_err("%s: Failed to request IRQ %d: %d\n", 4191 mmc_hostname(mmc), host->irq, ret); 4192 goto untasklet; 4193 } 4194 4195 ret = sdhci_led_register(host); 4196 if (ret) { 4197 pr_err("%s: Failed to register LED device: %d\n", 4198 mmc_hostname(mmc), ret); 4199 goto unirq; 4200 } 4201 4202 mmiowb(); 4203 4204 ret = mmc_add_host(mmc); 4205 if (ret) 4206 goto unled; 4207 4208 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4209 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4210 (host->flags & SDHCI_USE_ADMA) ? 4211 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4212 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4213 4214 sdhci_enable_card_detection(host); 4215 4216 return 0; 4217 4218 unled: 4219 sdhci_led_unregister(host); 4220 unirq: 4221 sdhci_do_reset(host, SDHCI_RESET_ALL); 4222 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4223 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4224 free_irq(host->irq, host); 4225 untasklet: 4226 tasklet_kill(&host->finish_tasklet); 4227 4228 return ret; 4229 } 4230 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4231 4232 int sdhci_add_host(struct sdhci_host *host) 4233 { 4234 int ret; 4235 4236 ret = sdhci_setup_host(host); 4237 if (ret) 4238 return ret; 4239 4240 ret = __sdhci_add_host(host); 4241 if (ret) 4242 goto cleanup; 4243 4244 return 0; 4245 4246 cleanup: 4247 sdhci_cleanup_host(host); 4248 4249 return ret; 4250 } 4251 EXPORT_SYMBOL_GPL(sdhci_add_host); 4252 4253 void sdhci_remove_host(struct sdhci_host *host, int dead) 4254 { 4255 struct mmc_host *mmc = host->mmc; 4256 unsigned long flags; 4257 4258 if (dead) { 4259 spin_lock_irqsave(&host->lock, flags); 4260 4261 host->flags |= SDHCI_DEVICE_DEAD; 4262 4263 if (sdhci_has_requests(host)) { 4264 pr_err("%s: Controller removed during " 4265 " transfer!\n", mmc_hostname(mmc)); 4266 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4267 } 4268 4269 spin_unlock_irqrestore(&host->lock, flags); 4270 } 4271 4272 sdhci_disable_card_detection(host); 4273 4274 mmc_remove_host(mmc); 4275 4276 sdhci_led_unregister(host); 4277 4278 if (!dead) 4279 sdhci_do_reset(host, SDHCI_RESET_ALL); 4280 4281 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4282 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4283 free_irq(host->irq, host); 4284 4285 del_timer_sync(&host->timer); 4286 del_timer_sync(&host->data_timer); 4287 4288 tasklet_kill(&host->finish_tasklet); 4289 4290 if (!IS_ERR(mmc->supply.vqmmc)) 4291 regulator_disable(mmc->supply.vqmmc); 4292 4293 if (host->align_buffer) 4294 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4295 host->adma_table_sz, host->align_buffer, 4296 host->align_addr); 4297 4298 host->adma_table = NULL; 4299 host->align_buffer = NULL; 4300 } 4301 4302 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4303 4304 void sdhci_free_host(struct sdhci_host *host) 4305 { 4306 mmc_free_host(host->mmc); 4307 } 4308 4309 EXPORT_SYMBOL_GPL(sdhci_free_host); 4310 4311 /*****************************************************************************\ 4312 * * 4313 * Driver init/exit * 4314 * * 4315 \*****************************************************************************/ 4316 4317 static int __init sdhci_drv_init(void) 4318 { 4319 pr_info(DRIVER_NAME 4320 ": Secure Digital Host Controller Interface driver\n"); 4321 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4322 4323 return 0; 4324 } 4325 4326 static void __exit sdhci_drv_exit(void) 4327 { 4328 } 4329 4330 module_init(sdhci_drv_init); 4331 module_exit(sdhci_drv_exit); 4332 4333 module_param(debug_quirks, uint, 0444); 4334 module_param(debug_quirks2, uint, 0444); 4335 4336 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4337 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4338 MODULE_LICENSE("GPL"); 4339 4340 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4341 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4342