1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/highmem.h> 18 #include <linux/io.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/regulator/consumer.h> 23 24 #include <linux/leds.h> 25 26 #include <linux/mmc/mmc.h> 27 #include <linux/mmc/host.h> 28 29 #include "sdhci.h" 30 31 #define DRIVER_NAME "sdhci" 32 33 #define DBG(f, x...) \ 34 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 35 36 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 37 defined(CONFIG_MMC_SDHCI_MODULE)) 38 #define SDHCI_USE_LEDS_CLASS 39 #endif 40 41 static unsigned int debug_quirks = 0; 42 43 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); 44 static void sdhci_finish_data(struct sdhci_host *); 45 46 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 47 static void sdhci_finish_command(struct sdhci_host *); 48 49 static void sdhci_dumpregs(struct sdhci_host *host) 50 { 51 printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 52 mmc_hostname(host->mmc)); 53 54 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 55 sdhci_readl(host, SDHCI_DMA_ADDRESS), 56 sdhci_readw(host, SDHCI_HOST_VERSION)); 57 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 58 sdhci_readw(host, SDHCI_BLOCK_SIZE), 59 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 60 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 61 sdhci_readl(host, SDHCI_ARGUMENT), 62 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 63 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 64 sdhci_readl(host, SDHCI_PRESENT_STATE), 65 sdhci_readb(host, SDHCI_HOST_CONTROL)); 66 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 67 sdhci_readb(host, SDHCI_POWER_CONTROL), 68 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 69 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 70 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 71 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 72 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 73 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 74 sdhci_readl(host, SDHCI_INT_STATUS)); 75 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 76 sdhci_readl(host, SDHCI_INT_ENABLE), 77 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 78 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 79 sdhci_readw(host, SDHCI_ACMD12_ERR), 80 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 81 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n", 82 sdhci_readl(host, SDHCI_CAPABILITIES), 83 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 84 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 85 sdhci_readw(host, SDHCI_COMMAND), 86 sdhci_readl(host, SDHCI_MAX_CURRENT)); 87 88 if (host->flags & SDHCI_USE_ADMA) 89 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 90 readl(host->ioaddr + SDHCI_ADMA_ERROR), 91 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 92 93 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); 94 } 95 96 /*****************************************************************************\ 97 * * 98 * Low level functions * 99 * * 100 \*****************************************************************************/ 101 102 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) 103 { 104 u32 ier; 105 106 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 107 ier &= ~clear; 108 ier |= set; 109 sdhci_writel(host, ier, SDHCI_INT_ENABLE); 110 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); 111 } 112 113 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) 114 { 115 sdhci_clear_set_irqs(host, 0, irqs); 116 } 117 118 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) 119 { 120 sdhci_clear_set_irqs(host, irqs, 0); 121 } 122 123 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 124 { 125 u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; 126 127 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 128 return; 129 130 if (enable) 131 sdhci_unmask_irqs(host, irqs); 132 else 133 sdhci_mask_irqs(host, irqs); 134 } 135 136 static void sdhci_enable_card_detection(struct sdhci_host *host) 137 { 138 sdhci_set_card_detection(host, true); 139 } 140 141 static void sdhci_disable_card_detection(struct sdhci_host *host) 142 { 143 sdhci_set_card_detection(host, false); 144 } 145 146 static void sdhci_reset(struct sdhci_host *host, u8 mask) 147 { 148 unsigned long timeout; 149 u32 uninitialized_var(ier); 150 151 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 152 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & 153 SDHCI_CARD_PRESENT)) 154 return; 155 } 156 157 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 158 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 159 160 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 161 162 if (mask & SDHCI_RESET_ALL) 163 host->clock = 0; 164 165 /* Wait max 100 ms */ 166 timeout = 100; 167 168 /* hw clears the bit when it's done */ 169 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 170 if (timeout == 0) { 171 printk(KERN_ERR "%s: Reset 0x%x never completed.\n", 172 mmc_hostname(host->mmc), (int)mask); 173 sdhci_dumpregs(host); 174 return; 175 } 176 timeout--; 177 mdelay(1); 178 } 179 180 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 181 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 182 } 183 184 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); 185 186 static void sdhci_init(struct sdhci_host *host, int soft) 187 { 188 if (soft) 189 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 190 else 191 sdhci_reset(host, SDHCI_RESET_ALL); 192 193 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, 194 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 195 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 196 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 197 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); 198 199 if (soft) { 200 /* force clock reconfiguration */ 201 host->clock = 0; 202 sdhci_set_ios(host->mmc, &host->mmc->ios); 203 } 204 } 205 206 static void sdhci_reinit(struct sdhci_host *host) 207 { 208 sdhci_init(host, 0); 209 sdhci_enable_card_detection(host); 210 } 211 212 static void sdhci_activate_led(struct sdhci_host *host) 213 { 214 u8 ctrl; 215 216 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 217 ctrl |= SDHCI_CTRL_LED; 218 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 219 } 220 221 static void sdhci_deactivate_led(struct sdhci_host *host) 222 { 223 u8 ctrl; 224 225 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 226 ctrl &= ~SDHCI_CTRL_LED; 227 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 228 } 229 230 #ifdef SDHCI_USE_LEDS_CLASS 231 static void sdhci_led_control(struct led_classdev *led, 232 enum led_brightness brightness) 233 { 234 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 235 unsigned long flags; 236 237 spin_lock_irqsave(&host->lock, flags); 238 239 if (brightness == LED_OFF) 240 sdhci_deactivate_led(host); 241 else 242 sdhci_activate_led(host); 243 244 spin_unlock_irqrestore(&host->lock, flags); 245 } 246 #endif 247 248 /*****************************************************************************\ 249 * * 250 * Core functions * 251 * * 252 \*****************************************************************************/ 253 254 static void sdhci_read_block_pio(struct sdhci_host *host) 255 { 256 unsigned long flags; 257 size_t blksize, len, chunk; 258 u32 uninitialized_var(scratch); 259 u8 *buf; 260 261 DBG("PIO reading\n"); 262 263 blksize = host->data->blksz; 264 chunk = 0; 265 266 local_irq_save(flags); 267 268 while (blksize) { 269 if (!sg_miter_next(&host->sg_miter)) 270 BUG(); 271 272 len = min(host->sg_miter.length, blksize); 273 274 blksize -= len; 275 host->sg_miter.consumed = len; 276 277 buf = host->sg_miter.addr; 278 279 while (len) { 280 if (chunk == 0) { 281 scratch = sdhci_readl(host, SDHCI_BUFFER); 282 chunk = 4; 283 } 284 285 *buf = scratch & 0xFF; 286 287 buf++; 288 scratch >>= 8; 289 chunk--; 290 len--; 291 } 292 } 293 294 sg_miter_stop(&host->sg_miter); 295 296 local_irq_restore(flags); 297 } 298 299 static void sdhci_write_block_pio(struct sdhci_host *host) 300 { 301 unsigned long flags; 302 size_t blksize, len, chunk; 303 u32 scratch; 304 u8 *buf; 305 306 DBG("PIO writing\n"); 307 308 blksize = host->data->blksz; 309 chunk = 0; 310 scratch = 0; 311 312 local_irq_save(flags); 313 314 while (blksize) { 315 if (!sg_miter_next(&host->sg_miter)) 316 BUG(); 317 318 len = min(host->sg_miter.length, blksize); 319 320 blksize -= len; 321 host->sg_miter.consumed = len; 322 323 buf = host->sg_miter.addr; 324 325 while (len) { 326 scratch |= (u32)*buf << (chunk * 8); 327 328 buf++; 329 chunk++; 330 len--; 331 332 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 333 sdhci_writel(host, scratch, SDHCI_BUFFER); 334 chunk = 0; 335 scratch = 0; 336 } 337 } 338 } 339 340 sg_miter_stop(&host->sg_miter); 341 342 local_irq_restore(flags); 343 } 344 345 static void sdhci_transfer_pio(struct sdhci_host *host) 346 { 347 u32 mask; 348 349 BUG_ON(!host->data); 350 351 if (host->blocks == 0) 352 return; 353 354 if (host->data->flags & MMC_DATA_READ) 355 mask = SDHCI_DATA_AVAILABLE; 356 else 357 mask = SDHCI_SPACE_AVAILABLE; 358 359 /* 360 * Some controllers (JMicron JMB38x) mess up the buffer bits 361 * for transfers < 4 bytes. As long as it is just one block, 362 * we can ignore the bits. 363 */ 364 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 365 (host->data->blocks == 1)) 366 mask = ~0; 367 368 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 369 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 370 udelay(100); 371 372 if (host->data->flags & MMC_DATA_READ) 373 sdhci_read_block_pio(host); 374 else 375 sdhci_write_block_pio(host); 376 377 host->blocks--; 378 if (host->blocks == 0) 379 break; 380 } 381 382 DBG("PIO transfer complete.\n"); 383 } 384 385 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 386 { 387 local_irq_save(*flags); 388 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 389 } 390 391 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 392 { 393 kunmap_atomic(buffer, KM_BIO_SRC_IRQ); 394 local_irq_restore(*flags); 395 } 396 397 static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) 398 { 399 __le32 *dataddr = (__le32 __force *)(desc + 4); 400 __le16 *cmdlen = (__le16 __force *)desc; 401 402 /* SDHCI specification says ADMA descriptors should be 4 byte 403 * aligned, so using 16 or 32bit operations should be safe. */ 404 405 cmdlen[0] = cpu_to_le16(cmd); 406 cmdlen[1] = cpu_to_le16(len); 407 408 dataddr[0] = cpu_to_le32(addr); 409 } 410 411 static int sdhci_adma_table_pre(struct sdhci_host *host, 412 struct mmc_data *data) 413 { 414 int direction; 415 416 u8 *desc; 417 u8 *align; 418 dma_addr_t addr; 419 dma_addr_t align_addr; 420 int len, offset; 421 422 struct scatterlist *sg; 423 int i; 424 char *buffer; 425 unsigned long flags; 426 427 /* 428 * The spec does not specify endianness of descriptor table. 429 * We currently guess that it is LE. 430 */ 431 432 if (data->flags & MMC_DATA_READ) 433 direction = DMA_FROM_DEVICE; 434 else 435 direction = DMA_TO_DEVICE; 436 437 /* 438 * The ADMA descriptor table is mapped further down as we 439 * need to fill it with data first. 440 */ 441 442 host->align_addr = dma_map_single(mmc_dev(host->mmc), 443 host->align_buffer, 128 * 4, direction); 444 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 445 goto fail; 446 BUG_ON(host->align_addr & 0x3); 447 448 host->sg_count = dma_map_sg(mmc_dev(host->mmc), 449 data->sg, data->sg_len, direction); 450 if (host->sg_count == 0) 451 goto unmap_align; 452 453 desc = host->adma_desc; 454 align = host->align_buffer; 455 456 align_addr = host->align_addr; 457 458 for_each_sg(data->sg, sg, host->sg_count, i) { 459 addr = sg_dma_address(sg); 460 len = sg_dma_len(sg); 461 462 /* 463 * The SDHCI specification states that ADMA 464 * addresses must be 32-bit aligned. If they 465 * aren't, then we use a bounce buffer for 466 * the (up to three) bytes that screw up the 467 * alignment. 468 */ 469 offset = (4 - (addr & 0x3)) & 0x3; 470 if (offset) { 471 if (data->flags & MMC_DATA_WRITE) { 472 buffer = sdhci_kmap_atomic(sg, &flags); 473 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 474 memcpy(align, buffer, offset); 475 sdhci_kunmap_atomic(buffer, &flags); 476 } 477 478 /* tran, valid */ 479 sdhci_set_adma_desc(desc, align_addr, offset, 0x21); 480 481 BUG_ON(offset > 65536); 482 483 align += 4; 484 align_addr += 4; 485 486 desc += 8; 487 488 addr += offset; 489 len -= offset; 490 } 491 492 BUG_ON(len > 65536); 493 494 /* tran, valid */ 495 sdhci_set_adma_desc(desc, addr, len, 0x21); 496 desc += 8; 497 498 /* 499 * If this triggers then we have a calculation bug 500 * somewhere. :/ 501 */ 502 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 503 } 504 505 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 506 /* 507 * Mark the last descriptor as the terminating descriptor 508 */ 509 if (desc != host->adma_desc) { 510 desc -= 8; 511 desc[0] |= 0x2; /* end */ 512 } 513 } else { 514 /* 515 * Add a terminating entry. 516 */ 517 518 /* nop, end, valid */ 519 sdhci_set_adma_desc(desc, 0, 0, 0x3); 520 } 521 522 /* 523 * Resync align buffer as we might have changed it. 524 */ 525 if (data->flags & MMC_DATA_WRITE) { 526 dma_sync_single_for_device(mmc_dev(host->mmc), 527 host->align_addr, 128 * 4, direction); 528 } 529 530 host->adma_addr = dma_map_single(mmc_dev(host->mmc), 531 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 532 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) 533 goto unmap_entries; 534 BUG_ON(host->adma_addr & 0x3); 535 536 return 0; 537 538 unmap_entries: 539 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 540 data->sg_len, direction); 541 unmap_align: 542 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 543 128 * 4, direction); 544 fail: 545 return -EINVAL; 546 } 547 548 static void sdhci_adma_table_post(struct sdhci_host *host, 549 struct mmc_data *data) 550 { 551 int direction; 552 553 struct scatterlist *sg; 554 int i, size; 555 u8 *align; 556 char *buffer; 557 unsigned long flags; 558 559 if (data->flags & MMC_DATA_READ) 560 direction = DMA_FROM_DEVICE; 561 else 562 direction = DMA_TO_DEVICE; 563 564 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, 565 (128 * 2 + 1) * 4, DMA_TO_DEVICE); 566 567 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 568 128 * 4, direction); 569 570 if (data->flags & MMC_DATA_READ) { 571 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 572 data->sg_len, direction); 573 574 align = host->align_buffer; 575 576 for_each_sg(data->sg, sg, host->sg_count, i) { 577 if (sg_dma_address(sg) & 0x3) { 578 size = 4 - (sg_dma_address(sg) & 0x3); 579 580 buffer = sdhci_kmap_atomic(sg, &flags); 581 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 582 memcpy(buffer, align, size); 583 sdhci_kunmap_atomic(buffer, &flags); 584 585 align += 4; 586 } 587 } 588 } 589 590 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 591 data->sg_len, direction); 592 } 593 594 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 595 { 596 u8 count; 597 unsigned target_timeout, current_timeout; 598 599 /* 600 * If the host controller provides us with an incorrect timeout 601 * value, just skip the check and use 0xE. The hardware may take 602 * longer to time out, but that's much better than having a too-short 603 * timeout value. 604 */ 605 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 606 return 0xE; 607 608 /* timeout in us */ 609 target_timeout = data->timeout_ns / 1000 + 610 data->timeout_clks / host->clock; 611 612 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 613 host->timeout_clk = host->clock / 1000; 614 615 /* 616 * Figure out needed cycles. 617 * We do this in steps in order to fit inside a 32 bit int. 618 * The first step is the minimum timeout, which will have a 619 * minimum resolution of 6 bits: 620 * (1) 2^13*1000 > 2^22, 621 * (2) host->timeout_clk < 2^16 622 * => 623 * (1) / (2) > 2^6 624 */ 625 count = 0; 626 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 627 while (current_timeout < target_timeout) { 628 count++; 629 current_timeout <<= 1; 630 if (count >= 0xF) 631 break; 632 } 633 634 if (count >= 0xF) { 635 printk(KERN_WARNING "%s: Too large timeout requested!\n", 636 mmc_hostname(host->mmc)); 637 count = 0xE; 638 } 639 640 return count; 641 } 642 643 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 644 { 645 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 646 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 647 648 if (host->flags & SDHCI_REQ_USE_DMA) 649 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); 650 else 651 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 652 } 653 654 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 655 { 656 u8 count; 657 u8 ctrl; 658 int ret; 659 660 WARN_ON(host->data); 661 662 if (data == NULL) 663 return; 664 665 /* Sanity checks */ 666 BUG_ON(data->blksz * data->blocks > 524288); 667 BUG_ON(data->blksz > host->mmc->max_blk_size); 668 BUG_ON(data->blocks > 65535); 669 670 host->data = data; 671 host->data_early = 0; 672 673 count = sdhci_calc_timeout(host, data); 674 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 675 676 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 677 host->flags |= SDHCI_REQ_USE_DMA; 678 679 /* 680 * FIXME: This doesn't account for merging when mapping the 681 * scatterlist. 682 */ 683 if (host->flags & SDHCI_REQ_USE_DMA) { 684 int broken, i; 685 struct scatterlist *sg; 686 687 broken = 0; 688 if (host->flags & SDHCI_USE_ADMA) { 689 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 690 broken = 1; 691 } else { 692 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 693 broken = 1; 694 } 695 696 if (unlikely(broken)) { 697 for_each_sg(data->sg, sg, data->sg_len, i) { 698 if (sg->length & 0x3) { 699 DBG("Reverting to PIO because of " 700 "transfer size (%d)\n", 701 sg->length); 702 host->flags &= ~SDHCI_REQ_USE_DMA; 703 break; 704 } 705 } 706 } 707 } 708 709 /* 710 * The assumption here being that alignment is the same after 711 * translation to device address space. 712 */ 713 if (host->flags & SDHCI_REQ_USE_DMA) { 714 int broken, i; 715 struct scatterlist *sg; 716 717 broken = 0; 718 if (host->flags & SDHCI_USE_ADMA) { 719 /* 720 * As we use 3 byte chunks to work around 721 * alignment problems, we need to check this 722 * quirk. 723 */ 724 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) 725 broken = 1; 726 } else { 727 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 728 broken = 1; 729 } 730 731 if (unlikely(broken)) { 732 for_each_sg(data->sg, sg, data->sg_len, i) { 733 if (sg->offset & 0x3) { 734 DBG("Reverting to PIO because of " 735 "bad alignment\n"); 736 host->flags &= ~SDHCI_REQ_USE_DMA; 737 break; 738 } 739 } 740 } 741 } 742 743 if (host->flags & SDHCI_REQ_USE_DMA) { 744 if (host->flags & SDHCI_USE_ADMA) { 745 ret = sdhci_adma_table_pre(host, data); 746 if (ret) { 747 /* 748 * This only happens when someone fed 749 * us an invalid request. 750 */ 751 WARN_ON(1); 752 host->flags &= ~SDHCI_REQ_USE_DMA; 753 } else { 754 sdhci_writel(host, host->adma_addr, 755 SDHCI_ADMA_ADDRESS); 756 } 757 } else { 758 int sg_cnt; 759 760 sg_cnt = dma_map_sg(mmc_dev(host->mmc), 761 data->sg, data->sg_len, 762 (data->flags & MMC_DATA_READ) ? 763 DMA_FROM_DEVICE : 764 DMA_TO_DEVICE); 765 if (sg_cnt == 0) { 766 /* 767 * This only happens when someone fed 768 * us an invalid request. 769 */ 770 WARN_ON(1); 771 host->flags &= ~SDHCI_REQ_USE_DMA; 772 } else { 773 WARN_ON(sg_cnt != 1); 774 sdhci_writel(host, sg_dma_address(data->sg), 775 SDHCI_DMA_ADDRESS); 776 } 777 } 778 } 779 780 /* 781 * Always adjust the DMA selection as some controllers 782 * (e.g. JMicron) can't do PIO properly when the selection 783 * is ADMA. 784 */ 785 if (host->version >= SDHCI_SPEC_200) { 786 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 787 ctrl &= ~SDHCI_CTRL_DMA_MASK; 788 if ((host->flags & SDHCI_REQ_USE_DMA) && 789 (host->flags & SDHCI_USE_ADMA)) 790 ctrl |= SDHCI_CTRL_ADMA32; 791 else 792 ctrl |= SDHCI_CTRL_SDMA; 793 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 794 } 795 796 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 797 int flags; 798 799 flags = SG_MITER_ATOMIC; 800 if (host->data->flags & MMC_DATA_READ) 801 flags |= SG_MITER_TO_SG; 802 else 803 flags |= SG_MITER_FROM_SG; 804 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 805 host->blocks = data->blocks; 806 } 807 808 sdhci_set_transfer_irqs(host); 809 810 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 811 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); 812 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 813 } 814 815 static void sdhci_set_transfer_mode(struct sdhci_host *host, 816 struct mmc_data *data) 817 { 818 u16 mode; 819 820 if (data == NULL) 821 return; 822 823 WARN_ON(!host->data); 824 825 mode = SDHCI_TRNS_BLK_CNT_EN; 826 if (data->blocks > 1) { 827 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 828 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; 829 else 830 mode |= SDHCI_TRNS_MULTI; 831 } 832 if (data->flags & MMC_DATA_READ) 833 mode |= SDHCI_TRNS_READ; 834 if (host->flags & SDHCI_REQ_USE_DMA) 835 mode |= SDHCI_TRNS_DMA; 836 837 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 838 } 839 840 static void sdhci_finish_data(struct sdhci_host *host) 841 { 842 struct mmc_data *data; 843 844 BUG_ON(!host->data); 845 846 data = host->data; 847 host->data = NULL; 848 849 if (host->flags & SDHCI_REQ_USE_DMA) { 850 if (host->flags & SDHCI_USE_ADMA) 851 sdhci_adma_table_post(host, data); 852 else { 853 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 854 data->sg_len, (data->flags & MMC_DATA_READ) ? 855 DMA_FROM_DEVICE : DMA_TO_DEVICE); 856 } 857 } 858 859 /* 860 * The specification states that the block count register must 861 * be updated, but it does not specify at what point in the 862 * data flow. That makes the register entirely useless to read 863 * back so we have to assume that nothing made it to the card 864 * in the event of an error. 865 */ 866 if (data->error) 867 data->bytes_xfered = 0; 868 else 869 data->bytes_xfered = data->blksz * data->blocks; 870 871 if (data->stop) { 872 /* 873 * The controller needs a reset of internal state machines 874 * upon error conditions. 875 */ 876 if (data->error) { 877 sdhci_reset(host, SDHCI_RESET_CMD); 878 sdhci_reset(host, SDHCI_RESET_DATA); 879 } 880 881 sdhci_send_command(host, data->stop); 882 } else 883 tasklet_schedule(&host->finish_tasklet); 884 } 885 886 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 887 { 888 int flags; 889 u32 mask; 890 unsigned long timeout; 891 892 WARN_ON(host->cmd); 893 894 /* Wait max 10 ms */ 895 timeout = 10; 896 897 mask = SDHCI_CMD_INHIBIT; 898 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 899 mask |= SDHCI_DATA_INHIBIT; 900 901 /* We shouldn't wait for data inihibit for stop commands, even 902 though they might use busy signaling */ 903 if (host->mrq->data && (cmd == host->mrq->data->stop)) 904 mask &= ~SDHCI_DATA_INHIBIT; 905 906 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 907 if (timeout == 0) { 908 printk(KERN_ERR "%s: Controller never released " 909 "inhibit bit(s).\n", mmc_hostname(host->mmc)); 910 sdhci_dumpregs(host); 911 cmd->error = -EIO; 912 tasklet_schedule(&host->finish_tasklet); 913 return; 914 } 915 timeout--; 916 mdelay(1); 917 } 918 919 mod_timer(&host->timer, jiffies + 10 * HZ); 920 921 host->cmd = cmd; 922 923 sdhci_prepare_data(host, cmd->data); 924 925 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 926 927 sdhci_set_transfer_mode(host, cmd->data); 928 929 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 930 printk(KERN_ERR "%s: Unsupported response type!\n", 931 mmc_hostname(host->mmc)); 932 cmd->error = -EINVAL; 933 tasklet_schedule(&host->finish_tasklet); 934 return; 935 } 936 937 if (!(cmd->flags & MMC_RSP_PRESENT)) 938 flags = SDHCI_CMD_RESP_NONE; 939 else if (cmd->flags & MMC_RSP_136) 940 flags = SDHCI_CMD_RESP_LONG; 941 else if (cmd->flags & MMC_RSP_BUSY) 942 flags = SDHCI_CMD_RESP_SHORT_BUSY; 943 else 944 flags = SDHCI_CMD_RESP_SHORT; 945 946 if (cmd->flags & MMC_RSP_CRC) 947 flags |= SDHCI_CMD_CRC; 948 if (cmd->flags & MMC_RSP_OPCODE) 949 flags |= SDHCI_CMD_INDEX; 950 if (cmd->data) 951 flags |= SDHCI_CMD_DATA; 952 953 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 954 } 955 956 static void sdhci_finish_command(struct sdhci_host *host) 957 { 958 int i; 959 960 BUG_ON(host->cmd == NULL); 961 962 if (host->cmd->flags & MMC_RSP_PRESENT) { 963 if (host->cmd->flags & MMC_RSP_136) { 964 /* CRC is stripped so we need to do some shifting. */ 965 for (i = 0;i < 4;i++) { 966 host->cmd->resp[i] = sdhci_readl(host, 967 SDHCI_RESPONSE + (3-i)*4) << 8; 968 if (i != 3) 969 host->cmd->resp[i] |= 970 sdhci_readb(host, 971 SDHCI_RESPONSE + (3-i)*4-1); 972 } 973 } else { 974 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 975 } 976 } 977 978 host->cmd->error = 0; 979 980 if (host->data && host->data_early) 981 sdhci_finish_data(host); 982 983 if (!host->cmd->data) 984 tasklet_schedule(&host->finish_tasklet); 985 986 host->cmd = NULL; 987 } 988 989 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 990 { 991 int div; 992 u16 clk; 993 unsigned long timeout; 994 995 if (clock == host->clock) 996 return; 997 998 if (host->ops->set_clock) { 999 host->ops->set_clock(host, clock); 1000 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) 1001 return; 1002 } 1003 1004 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1005 1006 if (clock == 0) 1007 goto out; 1008 1009 if (host->version >= SDHCI_SPEC_300) { 1010 /* Version 3.00 divisors must be a multiple of 2. */ 1011 if (host->max_clk <= clock) 1012 div = 1; 1013 else { 1014 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { 1015 if ((host->max_clk / div) <= clock) 1016 break; 1017 } 1018 } 1019 } else { 1020 /* Version 2.00 divisors must be a power of 2. */ 1021 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1022 if ((host->max_clk / div) <= clock) 1023 break; 1024 } 1025 } 1026 div >>= 1; 1027 1028 clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1029 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1030 << SDHCI_DIVIDER_HI_SHIFT; 1031 clk |= SDHCI_CLOCK_INT_EN; 1032 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1033 1034 /* Wait max 20 ms */ 1035 timeout = 20; 1036 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1037 & SDHCI_CLOCK_INT_STABLE)) { 1038 if (timeout == 0) { 1039 printk(KERN_ERR "%s: Internal clock never " 1040 "stabilised.\n", mmc_hostname(host->mmc)); 1041 sdhci_dumpregs(host); 1042 return; 1043 } 1044 timeout--; 1045 mdelay(1); 1046 } 1047 1048 clk |= SDHCI_CLOCK_CARD_EN; 1049 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1050 1051 out: 1052 host->clock = clock; 1053 } 1054 1055 static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 1056 { 1057 u8 pwr = 0; 1058 1059 if (power != (unsigned short)-1) { 1060 switch (1 << power) { 1061 case MMC_VDD_165_195: 1062 pwr = SDHCI_POWER_180; 1063 break; 1064 case MMC_VDD_29_30: 1065 case MMC_VDD_30_31: 1066 pwr = SDHCI_POWER_300; 1067 break; 1068 case MMC_VDD_32_33: 1069 case MMC_VDD_33_34: 1070 pwr = SDHCI_POWER_330; 1071 break; 1072 default: 1073 BUG(); 1074 } 1075 } 1076 1077 if (host->pwr == pwr) 1078 return; 1079 1080 host->pwr = pwr; 1081 1082 if (pwr == 0) { 1083 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1084 return; 1085 } 1086 1087 /* 1088 * Spec says that we should clear the power reg before setting 1089 * a new value. Some controllers don't seem to like this though. 1090 */ 1091 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1092 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1093 1094 /* 1095 * At least the Marvell CaFe chip gets confused if we set the voltage 1096 * and set turn on power at the same time, so set the voltage first. 1097 */ 1098 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 1099 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1100 1101 pwr |= SDHCI_POWER_ON; 1102 1103 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1104 1105 /* 1106 * Some controllers need an extra 10ms delay of 10ms before they 1107 * can apply clock after applying power 1108 */ 1109 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1110 mdelay(10); 1111 } 1112 1113 /*****************************************************************************\ 1114 * * 1115 * MMC callbacks * 1116 * * 1117 \*****************************************************************************/ 1118 1119 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1120 { 1121 struct sdhci_host *host; 1122 bool present; 1123 unsigned long flags; 1124 1125 host = mmc_priv(mmc); 1126 1127 spin_lock_irqsave(&host->lock, flags); 1128 1129 WARN_ON(host->mrq != NULL); 1130 1131 #ifndef SDHCI_USE_LEDS_CLASS 1132 sdhci_activate_led(host); 1133 #endif 1134 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { 1135 if (mrq->stop) { 1136 mrq->data->stop = NULL; 1137 mrq->stop = NULL; 1138 } 1139 } 1140 1141 host->mrq = mrq; 1142 1143 /* If polling, assume that the card is always present. */ 1144 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1145 present = true; 1146 else 1147 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 1148 SDHCI_CARD_PRESENT; 1149 1150 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1151 host->mrq->cmd->error = -ENOMEDIUM; 1152 tasklet_schedule(&host->finish_tasklet); 1153 } else 1154 sdhci_send_command(host, mrq->cmd); 1155 1156 mmiowb(); 1157 spin_unlock_irqrestore(&host->lock, flags); 1158 } 1159 1160 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1161 { 1162 struct sdhci_host *host; 1163 unsigned long flags; 1164 u8 ctrl; 1165 1166 host = mmc_priv(mmc); 1167 1168 spin_lock_irqsave(&host->lock, flags); 1169 1170 if (host->flags & SDHCI_DEVICE_DEAD) 1171 goto out; 1172 1173 /* 1174 * Reset the chip on each power off. 1175 * Should clear out any weird states. 1176 */ 1177 if (ios->power_mode == MMC_POWER_OFF) { 1178 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 1179 sdhci_reinit(host); 1180 } 1181 1182 sdhci_set_clock(host, ios->clock); 1183 1184 if (ios->power_mode == MMC_POWER_OFF) 1185 sdhci_set_power(host, -1); 1186 else 1187 sdhci_set_power(host, ios->vdd); 1188 1189 if (host->ops->platform_send_init_74_clocks) 1190 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1191 1192 /* 1193 * If your platform has 8-bit width support but is not a v3 controller, 1194 * or if it requires special setup code, you should implement that in 1195 * platform_8bit_width(). 1196 */ 1197 if (host->ops->platform_8bit_width) 1198 host->ops->platform_8bit_width(host, ios->bus_width); 1199 else { 1200 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1201 if (ios->bus_width == MMC_BUS_WIDTH_8) { 1202 ctrl &= ~SDHCI_CTRL_4BITBUS; 1203 if (host->version >= SDHCI_SPEC_300) 1204 ctrl |= SDHCI_CTRL_8BITBUS; 1205 } else { 1206 if (host->version >= SDHCI_SPEC_300) 1207 ctrl &= ~SDHCI_CTRL_8BITBUS; 1208 if (ios->bus_width == MMC_BUS_WIDTH_4) 1209 ctrl |= SDHCI_CTRL_4BITBUS; 1210 else 1211 ctrl &= ~SDHCI_CTRL_4BITBUS; 1212 } 1213 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1214 } 1215 1216 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1217 1218 if ((ios->timing == MMC_TIMING_SD_HS || 1219 ios->timing == MMC_TIMING_MMC_HS) 1220 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) 1221 ctrl |= SDHCI_CTRL_HISPD; 1222 else 1223 ctrl &= ~SDHCI_CTRL_HISPD; 1224 1225 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1226 1227 /* 1228 * Some (ENE) controllers go apeshit on some ios operation, 1229 * signalling timeout and CRC errors even on CMD0. Resetting 1230 * it on each ios seems to solve the problem. 1231 */ 1232 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1233 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1234 1235 out: 1236 mmiowb(); 1237 spin_unlock_irqrestore(&host->lock, flags); 1238 } 1239 1240 static int sdhci_get_ro(struct mmc_host *mmc) 1241 { 1242 struct sdhci_host *host; 1243 unsigned long flags; 1244 int is_readonly; 1245 1246 host = mmc_priv(mmc); 1247 1248 spin_lock_irqsave(&host->lock, flags); 1249 1250 if (host->flags & SDHCI_DEVICE_DEAD) 1251 is_readonly = 0; 1252 else if (host->ops->get_ro) 1253 is_readonly = host->ops->get_ro(host); 1254 else 1255 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 1256 & SDHCI_WRITE_PROTECT); 1257 1258 spin_unlock_irqrestore(&host->lock, flags); 1259 1260 /* This quirk needs to be replaced by a callback-function later */ 1261 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 1262 !is_readonly : is_readonly; 1263 } 1264 1265 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1266 { 1267 struct sdhci_host *host; 1268 unsigned long flags; 1269 1270 host = mmc_priv(mmc); 1271 1272 spin_lock_irqsave(&host->lock, flags); 1273 1274 if (host->flags & SDHCI_DEVICE_DEAD) 1275 goto out; 1276 1277 if (enable) 1278 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); 1279 else 1280 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); 1281 out: 1282 mmiowb(); 1283 1284 spin_unlock_irqrestore(&host->lock, flags); 1285 } 1286 1287 static const struct mmc_host_ops sdhci_ops = { 1288 .request = sdhci_request, 1289 .set_ios = sdhci_set_ios, 1290 .get_ro = sdhci_get_ro, 1291 .enable_sdio_irq = sdhci_enable_sdio_irq, 1292 }; 1293 1294 /*****************************************************************************\ 1295 * * 1296 * Tasklets * 1297 * * 1298 \*****************************************************************************/ 1299 1300 static void sdhci_tasklet_card(unsigned long param) 1301 { 1302 struct sdhci_host *host; 1303 unsigned long flags; 1304 1305 host = (struct sdhci_host*)param; 1306 1307 spin_lock_irqsave(&host->lock, flags); 1308 1309 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1310 if (host->mrq) { 1311 printk(KERN_ERR "%s: Card removed during transfer!\n", 1312 mmc_hostname(host->mmc)); 1313 printk(KERN_ERR "%s: Resetting controller.\n", 1314 mmc_hostname(host->mmc)); 1315 1316 sdhci_reset(host, SDHCI_RESET_CMD); 1317 sdhci_reset(host, SDHCI_RESET_DATA); 1318 1319 host->mrq->cmd->error = -ENOMEDIUM; 1320 tasklet_schedule(&host->finish_tasklet); 1321 } 1322 } 1323 1324 spin_unlock_irqrestore(&host->lock, flags); 1325 1326 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 1327 } 1328 1329 static void sdhci_tasklet_finish(unsigned long param) 1330 { 1331 struct sdhci_host *host; 1332 unsigned long flags; 1333 struct mmc_request *mrq; 1334 1335 host = (struct sdhci_host*)param; 1336 1337 spin_lock_irqsave(&host->lock, flags); 1338 1339 del_timer(&host->timer); 1340 1341 mrq = host->mrq; 1342 1343 /* 1344 * The controller needs a reset of internal state machines 1345 * upon error conditions. 1346 */ 1347 if (!(host->flags & SDHCI_DEVICE_DEAD) && 1348 (mrq->cmd->error || 1349 (mrq->data && (mrq->data->error || 1350 (mrq->data->stop && mrq->data->stop->error))) || 1351 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 1352 1353 /* Some controllers need this kick or reset won't work here */ 1354 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 1355 unsigned int clock; 1356 1357 /* This is to force an update */ 1358 clock = host->clock; 1359 host->clock = 0; 1360 sdhci_set_clock(host, clock); 1361 } 1362 1363 /* Spec says we should do both at the same time, but Ricoh 1364 controllers do not like that. */ 1365 sdhci_reset(host, SDHCI_RESET_CMD); 1366 sdhci_reset(host, SDHCI_RESET_DATA); 1367 } 1368 1369 host->mrq = NULL; 1370 host->cmd = NULL; 1371 host->data = NULL; 1372 1373 #ifndef SDHCI_USE_LEDS_CLASS 1374 sdhci_deactivate_led(host); 1375 #endif 1376 1377 mmiowb(); 1378 spin_unlock_irqrestore(&host->lock, flags); 1379 1380 mmc_request_done(host->mmc, mrq); 1381 } 1382 1383 static void sdhci_timeout_timer(unsigned long data) 1384 { 1385 struct sdhci_host *host; 1386 unsigned long flags; 1387 1388 host = (struct sdhci_host*)data; 1389 1390 spin_lock_irqsave(&host->lock, flags); 1391 1392 if (host->mrq) { 1393 printk(KERN_ERR "%s: Timeout waiting for hardware " 1394 "interrupt.\n", mmc_hostname(host->mmc)); 1395 sdhci_dumpregs(host); 1396 1397 if (host->data) { 1398 host->data->error = -ETIMEDOUT; 1399 sdhci_finish_data(host); 1400 } else { 1401 if (host->cmd) 1402 host->cmd->error = -ETIMEDOUT; 1403 else 1404 host->mrq->cmd->error = -ETIMEDOUT; 1405 1406 tasklet_schedule(&host->finish_tasklet); 1407 } 1408 } 1409 1410 mmiowb(); 1411 spin_unlock_irqrestore(&host->lock, flags); 1412 } 1413 1414 /*****************************************************************************\ 1415 * * 1416 * Interrupt handling * 1417 * * 1418 \*****************************************************************************/ 1419 1420 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) 1421 { 1422 BUG_ON(intmask == 0); 1423 1424 if (!host->cmd) { 1425 printk(KERN_ERR "%s: Got command interrupt 0x%08x even " 1426 "though no command operation was in progress.\n", 1427 mmc_hostname(host->mmc), (unsigned)intmask); 1428 sdhci_dumpregs(host); 1429 return; 1430 } 1431 1432 if (intmask & SDHCI_INT_TIMEOUT) 1433 host->cmd->error = -ETIMEDOUT; 1434 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | 1435 SDHCI_INT_INDEX)) 1436 host->cmd->error = -EILSEQ; 1437 1438 if (host->cmd->error) { 1439 tasklet_schedule(&host->finish_tasklet); 1440 return; 1441 } 1442 1443 /* 1444 * The host can send and interrupt when the busy state has 1445 * ended, allowing us to wait without wasting CPU cycles. 1446 * Unfortunately this is overloaded on the "data complete" 1447 * interrupt, so we need to take some care when handling 1448 * it. 1449 * 1450 * Note: The 1.0 specification is a bit ambiguous about this 1451 * feature so there might be some problems with older 1452 * controllers. 1453 */ 1454 if (host->cmd->flags & MMC_RSP_BUSY) { 1455 if (host->cmd->data) 1456 DBG("Cannot wait for busy signal when also " 1457 "doing a data transfer"); 1458 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)) 1459 return; 1460 1461 /* The controller does not support the end-of-busy IRQ, 1462 * fall through and take the SDHCI_INT_RESPONSE */ 1463 } 1464 1465 if (intmask & SDHCI_INT_RESPONSE) 1466 sdhci_finish_command(host); 1467 } 1468 1469 #ifdef CONFIG_MMC_DEBUG 1470 static void sdhci_show_adma_error(struct sdhci_host *host) 1471 { 1472 const char *name = mmc_hostname(host->mmc); 1473 u8 *desc = host->adma_desc; 1474 __le32 *dma; 1475 __le16 *len; 1476 u8 attr; 1477 1478 sdhci_dumpregs(host); 1479 1480 while (true) { 1481 dma = (__le32 *)(desc + 4); 1482 len = (__le16 *)(desc + 2); 1483 attr = *desc; 1484 1485 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 1486 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); 1487 1488 desc += 8; 1489 1490 if (attr & 2) 1491 break; 1492 } 1493 } 1494 #else 1495 static void sdhci_show_adma_error(struct sdhci_host *host) { } 1496 #endif 1497 1498 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 1499 { 1500 BUG_ON(intmask == 0); 1501 1502 if (!host->data) { 1503 /* 1504 * The "data complete" interrupt is also used to 1505 * indicate that a busy state has ended. See comment 1506 * above in sdhci_cmd_irq(). 1507 */ 1508 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { 1509 if (intmask & SDHCI_INT_DATA_END) { 1510 sdhci_finish_command(host); 1511 return; 1512 } 1513 } 1514 1515 printk(KERN_ERR "%s: Got data interrupt 0x%08x even " 1516 "though no data operation was in progress.\n", 1517 mmc_hostname(host->mmc), (unsigned)intmask); 1518 sdhci_dumpregs(host); 1519 1520 return; 1521 } 1522 1523 if (intmask & SDHCI_INT_DATA_TIMEOUT) 1524 host->data->error = -ETIMEDOUT; 1525 else if (intmask & SDHCI_INT_DATA_END_BIT) 1526 host->data->error = -EILSEQ; 1527 else if ((intmask & SDHCI_INT_DATA_CRC) && 1528 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 1529 != MMC_BUS_TEST_R) 1530 host->data->error = -EILSEQ; 1531 else if (intmask & SDHCI_INT_ADMA_ERROR) { 1532 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 1533 sdhci_show_adma_error(host); 1534 host->data->error = -EIO; 1535 } 1536 1537 if (host->data->error) 1538 sdhci_finish_data(host); 1539 else { 1540 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 1541 sdhci_transfer_pio(host); 1542 1543 /* 1544 * We currently don't do anything fancy with DMA 1545 * boundaries, but as we can't disable the feature 1546 * we need to at least restart the transfer. 1547 */ 1548 if (intmask & SDHCI_INT_DMA_END) 1549 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), 1550 SDHCI_DMA_ADDRESS); 1551 1552 if (intmask & SDHCI_INT_DATA_END) { 1553 if (host->cmd) { 1554 /* 1555 * Data managed to finish before the 1556 * command completed. Make sure we do 1557 * things in the proper order. 1558 */ 1559 host->data_early = 1; 1560 } else { 1561 sdhci_finish_data(host); 1562 } 1563 } 1564 } 1565 } 1566 1567 static irqreturn_t sdhci_irq(int irq, void *dev_id) 1568 { 1569 irqreturn_t result; 1570 struct sdhci_host* host = dev_id; 1571 u32 intmask; 1572 int cardint = 0; 1573 1574 spin_lock(&host->lock); 1575 1576 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 1577 1578 if (!intmask || intmask == 0xffffffff) { 1579 result = IRQ_NONE; 1580 goto out; 1581 } 1582 1583 DBG("*** %s got interrupt: 0x%08x\n", 1584 mmc_hostname(host->mmc), intmask); 1585 1586 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 1587 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 1588 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 1589 tasklet_schedule(&host->card_tasklet); 1590 } 1591 1592 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 1593 1594 if (intmask & SDHCI_INT_CMD_MASK) { 1595 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, 1596 SDHCI_INT_STATUS); 1597 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 1598 } 1599 1600 if (intmask & SDHCI_INT_DATA_MASK) { 1601 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, 1602 SDHCI_INT_STATUS); 1603 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 1604 } 1605 1606 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 1607 1608 intmask &= ~SDHCI_INT_ERROR; 1609 1610 if (intmask & SDHCI_INT_BUS_POWER) { 1611 printk(KERN_ERR "%s: Card is consuming too much power!\n", 1612 mmc_hostname(host->mmc)); 1613 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); 1614 } 1615 1616 intmask &= ~SDHCI_INT_BUS_POWER; 1617 1618 if (intmask & SDHCI_INT_CARD_INT) 1619 cardint = 1; 1620 1621 intmask &= ~SDHCI_INT_CARD_INT; 1622 1623 if (intmask) { 1624 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n", 1625 mmc_hostname(host->mmc), intmask); 1626 sdhci_dumpregs(host); 1627 1628 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 1629 } 1630 1631 result = IRQ_HANDLED; 1632 1633 mmiowb(); 1634 out: 1635 spin_unlock(&host->lock); 1636 1637 /* 1638 * We have to delay this as it calls back into the driver. 1639 */ 1640 if (cardint) 1641 mmc_signal_sdio_irq(host->mmc); 1642 1643 return result; 1644 } 1645 1646 /*****************************************************************************\ 1647 * * 1648 * Suspend/resume * 1649 * * 1650 \*****************************************************************************/ 1651 1652 #ifdef CONFIG_PM 1653 1654 int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) 1655 { 1656 int ret; 1657 1658 sdhci_disable_card_detection(host); 1659 1660 ret = mmc_suspend_host(host->mmc); 1661 if (ret) 1662 return ret; 1663 1664 free_irq(host->irq, host); 1665 1666 if (host->vmmc) 1667 ret = regulator_disable(host->vmmc); 1668 1669 return ret; 1670 } 1671 1672 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 1673 1674 int sdhci_resume_host(struct sdhci_host *host) 1675 { 1676 int ret; 1677 1678 if (host->vmmc) { 1679 int ret = regulator_enable(host->vmmc); 1680 if (ret) 1681 return ret; 1682 } 1683 1684 1685 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1686 if (host->ops->enable_dma) 1687 host->ops->enable_dma(host); 1688 } 1689 1690 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 1691 mmc_hostname(host->mmc), host); 1692 if (ret) 1693 return ret; 1694 1695 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); 1696 mmiowb(); 1697 1698 ret = mmc_resume_host(host->mmc); 1699 sdhci_enable_card_detection(host); 1700 1701 return ret; 1702 } 1703 1704 EXPORT_SYMBOL_GPL(sdhci_resume_host); 1705 1706 void sdhci_enable_irq_wakeups(struct sdhci_host *host) 1707 { 1708 u8 val; 1709 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 1710 val |= SDHCI_WAKE_ON_INT; 1711 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 1712 } 1713 1714 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 1715 1716 #endif /* CONFIG_PM */ 1717 1718 /*****************************************************************************\ 1719 * * 1720 * Device allocation/registration * 1721 * * 1722 \*****************************************************************************/ 1723 1724 struct sdhci_host *sdhci_alloc_host(struct device *dev, 1725 size_t priv_size) 1726 { 1727 struct mmc_host *mmc; 1728 struct sdhci_host *host; 1729 1730 WARN_ON(dev == NULL); 1731 1732 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 1733 if (!mmc) 1734 return ERR_PTR(-ENOMEM); 1735 1736 host = mmc_priv(mmc); 1737 host->mmc = mmc; 1738 1739 return host; 1740 } 1741 1742 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 1743 1744 int sdhci_add_host(struct sdhci_host *host) 1745 { 1746 struct mmc_host *mmc; 1747 unsigned int caps, ocr_avail; 1748 int ret; 1749 1750 WARN_ON(host == NULL); 1751 if (host == NULL) 1752 return -EINVAL; 1753 1754 mmc = host->mmc; 1755 1756 if (debug_quirks) 1757 host->quirks = debug_quirks; 1758 1759 sdhci_reset(host, SDHCI_RESET_ALL); 1760 1761 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 1762 host->version = (host->version & SDHCI_SPEC_VER_MASK) 1763 >> SDHCI_SPEC_VER_SHIFT; 1764 if (host->version > SDHCI_SPEC_300) { 1765 printk(KERN_ERR "%s: Unknown controller version (%d). " 1766 "You may experience problems.\n", mmc_hostname(mmc), 1767 host->version); 1768 } 1769 1770 caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 1771 sdhci_readl(host, SDHCI_CAPABILITIES); 1772 1773 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 1774 host->flags |= SDHCI_USE_SDMA; 1775 else if (!(caps & SDHCI_CAN_DO_SDMA)) 1776 DBG("Controller doesn't have SDMA capability\n"); 1777 else 1778 host->flags |= SDHCI_USE_SDMA; 1779 1780 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1781 (host->flags & SDHCI_USE_SDMA)) { 1782 DBG("Disabling DMA as it is marked broken\n"); 1783 host->flags &= ~SDHCI_USE_SDMA; 1784 } 1785 1786 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) 1787 host->flags |= SDHCI_USE_ADMA; 1788 1789 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 1790 (host->flags & SDHCI_USE_ADMA)) { 1791 DBG("Disabling ADMA as it is marked broken\n"); 1792 host->flags &= ~SDHCI_USE_ADMA; 1793 } 1794 1795 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1796 if (host->ops->enable_dma) { 1797 if (host->ops->enable_dma(host)) { 1798 printk(KERN_WARNING "%s: No suitable DMA " 1799 "available. Falling back to PIO.\n", 1800 mmc_hostname(mmc)); 1801 host->flags &= 1802 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 1803 } 1804 } 1805 } 1806 1807 if (host->flags & SDHCI_USE_ADMA) { 1808 /* 1809 * We need to allocate descriptors for all sg entries 1810 * (128) and potentially one alignment transfer for 1811 * each of those entries. 1812 */ 1813 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); 1814 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 1815 if (!host->adma_desc || !host->align_buffer) { 1816 kfree(host->adma_desc); 1817 kfree(host->align_buffer); 1818 printk(KERN_WARNING "%s: Unable to allocate ADMA " 1819 "buffers. Falling back to standard DMA.\n", 1820 mmc_hostname(mmc)); 1821 host->flags &= ~SDHCI_USE_ADMA; 1822 } 1823 } 1824 1825 /* 1826 * If we use DMA, then it's up to the caller to set the DMA 1827 * mask, but PIO does not need the hw shim so we set a new 1828 * mask here in that case. 1829 */ 1830 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 1831 host->dma_mask = DMA_BIT_MASK(64); 1832 mmc_dev(host->mmc)->dma_mask = &host->dma_mask; 1833 } 1834 1835 if (host->version >= SDHCI_SPEC_300) 1836 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) 1837 >> SDHCI_CLOCK_BASE_SHIFT; 1838 else 1839 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) 1840 >> SDHCI_CLOCK_BASE_SHIFT; 1841 1842 host->max_clk *= 1000000; 1843 if (host->max_clk == 0 || host->quirks & 1844 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 1845 if (!host->ops->get_max_clock) { 1846 printk(KERN_ERR 1847 "%s: Hardware doesn't specify base clock " 1848 "frequency.\n", mmc_hostname(mmc)); 1849 return -ENODEV; 1850 } 1851 host->max_clk = host->ops->get_max_clock(host); 1852 } 1853 1854 host->timeout_clk = 1855 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 1856 if (host->timeout_clk == 0) { 1857 if (host->ops->get_timeout_clock) { 1858 host->timeout_clk = host->ops->get_timeout_clock(host); 1859 } else if (!(host->quirks & 1860 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 1861 printk(KERN_ERR 1862 "%s: Hardware doesn't specify timeout clock " 1863 "frequency.\n", mmc_hostname(mmc)); 1864 return -ENODEV; 1865 } 1866 } 1867 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1868 host->timeout_clk *= 1000; 1869 1870 /* 1871 * Set host parameters. 1872 */ 1873 mmc->ops = &sdhci_ops; 1874 if (host->ops->get_min_clock) 1875 mmc->f_min = host->ops->get_min_clock(host); 1876 else if (host->version >= SDHCI_SPEC_300) 1877 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 1878 else 1879 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 1880 1881 mmc->f_max = host->max_clk; 1882 mmc->caps |= MMC_CAP_SDIO_IRQ; 1883 1884 /* 1885 * A controller may support 8-bit width, but the board itself 1886 * might not have the pins brought out. Boards that support 1887 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 1888 * their platform code before calling sdhci_add_host(), and we 1889 * won't assume 8-bit width for hosts without that CAP. 1890 */ 1891 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 1892 mmc->caps |= MMC_CAP_4_BIT_DATA; 1893 1894 if (caps & SDHCI_CAN_DO_HISPD) 1895 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1896 1897 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 1898 mmc_card_is_removable(mmc)) 1899 mmc->caps |= MMC_CAP_NEEDS_POLL; 1900 1901 ocr_avail = 0; 1902 if (caps & SDHCI_CAN_VDD_330) 1903 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 1904 if (caps & SDHCI_CAN_VDD_300) 1905 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 1906 if (caps & SDHCI_CAN_VDD_180) 1907 ocr_avail |= MMC_VDD_165_195; 1908 1909 mmc->ocr_avail = ocr_avail; 1910 mmc->ocr_avail_sdio = ocr_avail; 1911 if (host->ocr_avail_sdio) 1912 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 1913 mmc->ocr_avail_sd = ocr_avail; 1914 if (host->ocr_avail_sd) 1915 mmc->ocr_avail_sd &= host->ocr_avail_sd; 1916 else /* normal SD controllers don't support 1.8V */ 1917 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 1918 mmc->ocr_avail_mmc = ocr_avail; 1919 if (host->ocr_avail_mmc) 1920 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 1921 1922 if (mmc->ocr_avail == 0) { 1923 printk(KERN_ERR "%s: Hardware doesn't report any " 1924 "support voltages.\n", mmc_hostname(mmc)); 1925 return -ENODEV; 1926 } 1927 1928 spin_lock_init(&host->lock); 1929 1930 /* 1931 * Maximum number of segments. Depends on if the hardware 1932 * can do scatter/gather or not. 1933 */ 1934 if (host->flags & SDHCI_USE_ADMA) 1935 mmc->max_segs = 128; 1936 else if (host->flags & SDHCI_USE_SDMA) 1937 mmc->max_segs = 1; 1938 else /* PIO */ 1939 mmc->max_segs = 128; 1940 1941 /* 1942 * Maximum number of sectors in one transfer. Limited by DMA boundary 1943 * size (512KiB). 1944 */ 1945 mmc->max_req_size = 524288; 1946 1947 /* 1948 * Maximum segment size. Could be one segment with the maximum number 1949 * of bytes. When doing hardware scatter/gather, each entry cannot 1950 * be larger than 64 KiB though. 1951 */ 1952 if (host->flags & SDHCI_USE_ADMA) { 1953 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) 1954 mmc->max_seg_size = 65535; 1955 else 1956 mmc->max_seg_size = 65536; 1957 } else { 1958 mmc->max_seg_size = mmc->max_req_size; 1959 } 1960 1961 /* 1962 * Maximum block size. This varies from controller to controller and 1963 * is specified in the capabilities register. 1964 */ 1965 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 1966 mmc->max_blk_size = 2; 1967 } else { 1968 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> 1969 SDHCI_MAX_BLOCK_SHIFT; 1970 if (mmc->max_blk_size >= 3) { 1971 printk(KERN_WARNING "%s: Invalid maximum block size, " 1972 "assuming 512 bytes\n", mmc_hostname(mmc)); 1973 mmc->max_blk_size = 0; 1974 } 1975 } 1976 1977 mmc->max_blk_size = 512 << mmc->max_blk_size; 1978 1979 /* 1980 * Maximum block count. 1981 */ 1982 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 1983 1984 /* 1985 * Init tasklets. 1986 */ 1987 tasklet_init(&host->card_tasklet, 1988 sdhci_tasklet_card, (unsigned long)host); 1989 tasklet_init(&host->finish_tasklet, 1990 sdhci_tasklet_finish, (unsigned long)host); 1991 1992 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 1993 1994 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 1995 mmc_hostname(mmc), host); 1996 if (ret) 1997 goto untasklet; 1998 1999 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 2000 if (IS_ERR(host->vmmc)) { 2001 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2002 host->vmmc = NULL; 2003 } else { 2004 regulator_enable(host->vmmc); 2005 } 2006 2007 sdhci_init(host, 0); 2008 2009 #ifdef CONFIG_MMC_DEBUG 2010 sdhci_dumpregs(host); 2011 #endif 2012 2013 #ifdef SDHCI_USE_LEDS_CLASS 2014 snprintf(host->led_name, sizeof(host->led_name), 2015 "%s::", mmc_hostname(mmc)); 2016 host->led.name = host->led_name; 2017 host->led.brightness = LED_OFF; 2018 host->led.default_trigger = mmc_hostname(mmc); 2019 host->led.brightness_set = sdhci_led_control; 2020 2021 ret = led_classdev_register(mmc_dev(mmc), &host->led); 2022 if (ret) 2023 goto reset; 2024 #endif 2025 2026 mmiowb(); 2027 2028 mmc_add_host(mmc); 2029 2030 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", 2031 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 2032 (host->flags & SDHCI_USE_ADMA) ? "ADMA" : 2033 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 2034 2035 sdhci_enable_card_detection(host); 2036 2037 return 0; 2038 2039 #ifdef SDHCI_USE_LEDS_CLASS 2040 reset: 2041 sdhci_reset(host, SDHCI_RESET_ALL); 2042 free_irq(host->irq, host); 2043 #endif 2044 untasklet: 2045 tasklet_kill(&host->card_tasklet); 2046 tasklet_kill(&host->finish_tasklet); 2047 2048 return ret; 2049 } 2050 2051 EXPORT_SYMBOL_GPL(sdhci_add_host); 2052 2053 void sdhci_remove_host(struct sdhci_host *host, int dead) 2054 { 2055 unsigned long flags; 2056 2057 if (dead) { 2058 spin_lock_irqsave(&host->lock, flags); 2059 2060 host->flags |= SDHCI_DEVICE_DEAD; 2061 2062 if (host->mrq) { 2063 printk(KERN_ERR "%s: Controller removed during " 2064 " transfer!\n", mmc_hostname(host->mmc)); 2065 2066 host->mrq->cmd->error = -ENOMEDIUM; 2067 tasklet_schedule(&host->finish_tasklet); 2068 } 2069 2070 spin_unlock_irqrestore(&host->lock, flags); 2071 } 2072 2073 sdhci_disable_card_detection(host); 2074 2075 mmc_remove_host(host->mmc); 2076 2077 #ifdef SDHCI_USE_LEDS_CLASS 2078 led_classdev_unregister(&host->led); 2079 #endif 2080 2081 if (!dead) 2082 sdhci_reset(host, SDHCI_RESET_ALL); 2083 2084 free_irq(host->irq, host); 2085 2086 del_timer_sync(&host->timer); 2087 2088 tasklet_kill(&host->card_tasklet); 2089 tasklet_kill(&host->finish_tasklet); 2090 2091 if (host->vmmc) { 2092 regulator_disable(host->vmmc); 2093 regulator_put(host->vmmc); 2094 } 2095 2096 kfree(host->adma_desc); 2097 kfree(host->align_buffer); 2098 2099 host->adma_desc = NULL; 2100 host->align_buffer = NULL; 2101 } 2102 2103 EXPORT_SYMBOL_GPL(sdhci_remove_host); 2104 2105 void sdhci_free_host(struct sdhci_host *host) 2106 { 2107 mmc_free_host(host->mmc); 2108 } 2109 2110 EXPORT_SYMBOL_GPL(sdhci_free_host); 2111 2112 /*****************************************************************************\ 2113 * * 2114 * Driver init/exit * 2115 * * 2116 \*****************************************************************************/ 2117 2118 static int __init sdhci_drv_init(void) 2119 { 2120 printk(KERN_INFO DRIVER_NAME 2121 ": Secure Digital Host Controller Interface driver\n"); 2122 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 2123 2124 return 0; 2125 } 2126 2127 static void __exit sdhci_drv_exit(void) 2128 { 2129 } 2130 2131 module_init(sdhci_drv_init); 2132 module_exit(sdhci_drv_exit); 2133 2134 module_param(debug_quirks, uint, 0444); 2135 2136 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 2137 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 2138 MODULE_LICENSE("GPL"); 2139 2140 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 2141