1 /* 2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 3 * 4 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 * 11 * Thanks to the following companies for their support: 12 * 13 * - JMicron (hardware and technical support) 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/highmem.h> 18 #include <linux/pci.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/scatterlist.h> 21 22 #include <linux/mmc/host.h> 23 24 #include "sdhci.h" 25 26 #define DRIVER_NAME "sdhci" 27 28 #define DBG(f, x...) \ 29 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 30 31 static unsigned int debug_quirks = 0; 32 33 /* For multi controllers in one platform case */ 34 static u16 chip_index = 0; 35 static spinlock_t index_lock; 36 37 /* 38 * Different quirks to handle when the hardware deviates from a strict 39 * interpretation of the SDHCI specification. 40 */ 41 42 /* Controller doesn't honor resets unless we touch the clock register */ 43 #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) 44 /* Controller has bad caps bits, but really supports DMA */ 45 #define SDHCI_QUIRK_FORCE_DMA (1<<1) 46 /* Controller doesn't like some resets when there is no card inserted. */ 47 #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 48 /* Controller doesn't like clearing the power reg before a change */ 49 #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) 50 /* Controller has flaky internal state so reset it on each ios change */ 51 #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) 52 /* Controller has an unusable DMA engine */ 53 #define SDHCI_QUIRK_BROKEN_DMA (1<<5) 54 /* Controller can only DMA from 32-bit aligned addresses */ 55 #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) 56 /* Controller can only DMA chunk sizes that are a multiple of 32 bits */ 57 #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) 58 /* Controller needs to be reset after each request to stay stable */ 59 #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) 60 61 static const struct pci_device_id pci_ids[] __devinitdata = { 62 { 63 .vendor = PCI_VENDOR_ID_RICOH, 64 .device = PCI_DEVICE_ID_RICOH_R5C822, 65 .subvendor = PCI_VENDOR_ID_IBM, 66 .subdevice = PCI_ANY_ID, 67 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET | 68 SDHCI_QUIRK_FORCE_DMA, 69 }, 70 71 { 72 .vendor = PCI_VENDOR_ID_RICOH, 73 .device = PCI_DEVICE_ID_RICOH_R5C822, 74 .subvendor = PCI_ANY_ID, 75 .subdevice = PCI_ANY_ID, 76 .driver_data = SDHCI_QUIRK_FORCE_DMA | 77 SDHCI_QUIRK_NO_CARD_NO_RESET, 78 }, 79 80 { 81 .vendor = PCI_VENDOR_ID_TI, 82 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD, 83 .subvendor = PCI_ANY_ID, 84 .subdevice = PCI_ANY_ID, 85 .driver_data = SDHCI_QUIRK_FORCE_DMA, 86 }, 87 88 { 89 .vendor = PCI_VENDOR_ID_ENE, 90 .device = PCI_DEVICE_ID_ENE_CB712_SD, 91 .subvendor = PCI_ANY_ID, 92 .subdevice = PCI_ANY_ID, 93 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 94 SDHCI_QUIRK_BROKEN_DMA, 95 }, 96 97 { 98 .vendor = PCI_VENDOR_ID_ENE, 99 .device = PCI_DEVICE_ID_ENE_CB712_SD_2, 100 .subvendor = PCI_ANY_ID, 101 .subdevice = PCI_ANY_ID, 102 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 103 SDHCI_QUIRK_BROKEN_DMA, 104 }, 105 106 { 107 .vendor = PCI_VENDOR_ID_ENE, 108 .device = PCI_DEVICE_ID_ENE_CB714_SD, 109 .subvendor = PCI_ANY_ID, 110 .subdevice = PCI_ANY_ID, 111 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 112 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS, 113 }, 114 115 { 116 .vendor = PCI_VENDOR_ID_ENE, 117 .device = PCI_DEVICE_ID_ENE_CB714_SD_2, 118 .subvendor = PCI_ANY_ID, 119 .subdevice = PCI_ANY_ID, 120 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | 121 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS, 122 }, 123 124 { 125 .vendor = PCI_VENDOR_ID_JMICRON, 126 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, 127 .subvendor = PCI_ANY_ID, 128 .subdevice = PCI_ANY_ID, 129 .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR | 130 SDHCI_QUIRK_32BIT_DMA_SIZE | 131 SDHCI_QUIRK_RESET_AFTER_REQUEST, 132 }, 133 134 { /* Generic SD host controller */ 135 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 136 }, 137 138 { /* end: all zeroes */ }, 139 }; 140 141 MODULE_DEVICE_TABLE(pci, pci_ids); 142 143 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); 144 static void sdhci_finish_data(struct sdhci_host *); 145 146 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 147 static void sdhci_finish_command(struct sdhci_host *); 148 149 static void sdhci_dumpregs(struct sdhci_host *host) 150 { 151 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n"); 152 153 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n", 154 readl(host->ioaddr + SDHCI_DMA_ADDRESS), 155 readw(host->ioaddr + SDHCI_HOST_VERSION)); 156 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n", 157 readw(host->ioaddr + SDHCI_BLOCK_SIZE), 158 readw(host->ioaddr + SDHCI_BLOCK_COUNT)); 159 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", 160 readl(host->ioaddr + SDHCI_ARGUMENT), 161 readw(host->ioaddr + SDHCI_TRANSFER_MODE)); 162 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n", 163 readl(host->ioaddr + SDHCI_PRESENT_STATE), 164 readb(host->ioaddr + SDHCI_HOST_CONTROL)); 165 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n", 166 readb(host->ioaddr + SDHCI_POWER_CONTROL), 167 readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL)); 168 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n", 169 readb(host->ioaddr + SDHCI_WAKE_UP_CONTROL), 170 readw(host->ioaddr + SDHCI_CLOCK_CONTROL)); 171 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n", 172 readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL), 173 readl(host->ioaddr + SDHCI_INT_STATUS)); 174 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", 175 readl(host->ioaddr + SDHCI_INT_ENABLE), 176 readl(host->ioaddr + SDHCI_SIGNAL_ENABLE)); 177 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 178 readw(host->ioaddr + SDHCI_ACMD12_ERR), 179 readw(host->ioaddr + SDHCI_SLOT_INT_STATUS)); 180 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", 181 readl(host->ioaddr + SDHCI_CAPABILITIES), 182 readl(host->ioaddr + SDHCI_MAX_CURRENT)); 183 184 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); 185 } 186 187 /*****************************************************************************\ 188 * * 189 * Low level functions * 190 * * 191 \*****************************************************************************/ 192 193 static void sdhci_reset(struct sdhci_host *host, u8 mask) 194 { 195 unsigned long timeout; 196 197 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 198 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & 199 SDHCI_CARD_PRESENT)) 200 return; 201 } 202 203 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET); 204 205 if (mask & SDHCI_RESET_ALL) 206 host->clock = 0; 207 208 /* Wait max 100 ms */ 209 timeout = 100; 210 211 /* hw clears the bit when it's done */ 212 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) { 213 if (timeout == 0) { 214 printk(KERN_ERR "%s: Reset 0x%x never completed.\n", 215 mmc_hostname(host->mmc), (int)mask); 216 sdhci_dumpregs(host); 217 return; 218 } 219 timeout--; 220 mdelay(1); 221 } 222 } 223 224 static void sdhci_init(struct sdhci_host *host) 225 { 226 u32 intmask; 227 228 sdhci_reset(host, SDHCI_RESET_ALL); 229 230 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 231 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 232 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 233 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | 234 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 235 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; 236 237 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); 238 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); 239 } 240 241 static void sdhci_activate_led(struct sdhci_host *host) 242 { 243 u8 ctrl; 244 245 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); 246 ctrl |= SDHCI_CTRL_LED; 247 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 248 } 249 250 static void sdhci_deactivate_led(struct sdhci_host *host) 251 { 252 u8 ctrl; 253 254 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); 255 ctrl &= ~SDHCI_CTRL_LED; 256 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 257 } 258 259 /*****************************************************************************\ 260 * * 261 * Core functions * 262 * * 263 \*****************************************************************************/ 264 265 static inline char* sdhci_sg_to_buffer(struct sdhci_host* host) 266 { 267 return sg_virt(host->cur_sg); 268 } 269 270 static inline int sdhci_next_sg(struct sdhci_host* host) 271 { 272 /* 273 * Skip to next SG entry. 274 */ 275 host->cur_sg++; 276 host->num_sg--; 277 278 /* 279 * Any entries left? 280 */ 281 if (host->num_sg > 0) { 282 host->offset = 0; 283 host->remain = host->cur_sg->length; 284 } 285 286 return host->num_sg; 287 } 288 289 static void sdhci_read_block_pio(struct sdhci_host *host) 290 { 291 int blksize, chunk_remain; 292 u32 data; 293 char *buffer; 294 int size; 295 296 DBG("PIO reading\n"); 297 298 blksize = host->data->blksz; 299 chunk_remain = 0; 300 data = 0; 301 302 buffer = sdhci_sg_to_buffer(host) + host->offset; 303 304 while (blksize) { 305 if (chunk_remain == 0) { 306 data = readl(host->ioaddr + SDHCI_BUFFER); 307 chunk_remain = min(blksize, 4); 308 } 309 310 size = min(host->remain, chunk_remain); 311 312 chunk_remain -= size; 313 blksize -= size; 314 host->offset += size; 315 host->remain -= size; 316 317 while (size) { 318 *buffer = data & 0xFF; 319 buffer++; 320 data >>= 8; 321 size--; 322 } 323 324 if (host->remain == 0) { 325 if (sdhci_next_sg(host) == 0) { 326 BUG_ON(blksize != 0); 327 return; 328 } 329 buffer = sdhci_sg_to_buffer(host); 330 } 331 } 332 } 333 334 static void sdhci_write_block_pio(struct sdhci_host *host) 335 { 336 int blksize, chunk_remain; 337 u32 data; 338 char *buffer; 339 int bytes, size; 340 341 DBG("PIO writing\n"); 342 343 blksize = host->data->blksz; 344 chunk_remain = 4; 345 data = 0; 346 347 bytes = 0; 348 buffer = sdhci_sg_to_buffer(host) + host->offset; 349 350 while (blksize) { 351 size = min(host->remain, chunk_remain); 352 353 chunk_remain -= size; 354 blksize -= size; 355 host->offset += size; 356 host->remain -= size; 357 358 while (size) { 359 data >>= 8; 360 data |= (u32)*buffer << 24; 361 buffer++; 362 size--; 363 } 364 365 if (chunk_remain == 0) { 366 writel(data, host->ioaddr + SDHCI_BUFFER); 367 chunk_remain = min(blksize, 4); 368 } 369 370 if (host->remain == 0) { 371 if (sdhci_next_sg(host) == 0) { 372 BUG_ON(blksize != 0); 373 return; 374 } 375 buffer = sdhci_sg_to_buffer(host); 376 } 377 } 378 } 379 380 static void sdhci_transfer_pio(struct sdhci_host *host) 381 { 382 u32 mask; 383 384 BUG_ON(!host->data); 385 386 if (host->num_sg == 0) 387 return; 388 389 if (host->data->flags & MMC_DATA_READ) 390 mask = SDHCI_DATA_AVAILABLE; 391 else 392 mask = SDHCI_SPACE_AVAILABLE; 393 394 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) { 395 if (host->data->flags & MMC_DATA_READ) 396 sdhci_read_block_pio(host); 397 else 398 sdhci_write_block_pio(host); 399 400 if (host->num_sg == 0) 401 break; 402 } 403 404 DBG("PIO transfer complete.\n"); 405 } 406 407 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 408 { 409 u8 count; 410 unsigned target_timeout, current_timeout; 411 412 WARN_ON(host->data); 413 414 if (data == NULL) 415 return; 416 417 /* Sanity checks */ 418 BUG_ON(data->blksz * data->blocks > 524288); 419 BUG_ON(data->blksz > host->mmc->max_blk_size); 420 BUG_ON(data->blocks > 65535); 421 422 host->data = data; 423 host->data_early = 0; 424 425 /* timeout in us */ 426 target_timeout = data->timeout_ns / 1000 + 427 data->timeout_clks / host->clock; 428 429 /* 430 * Figure out needed cycles. 431 * We do this in steps in order to fit inside a 32 bit int. 432 * The first step is the minimum timeout, which will have a 433 * minimum resolution of 6 bits: 434 * (1) 2^13*1000 > 2^22, 435 * (2) host->timeout_clk < 2^16 436 * => 437 * (1) / (2) > 2^6 438 */ 439 count = 0; 440 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 441 while (current_timeout < target_timeout) { 442 count++; 443 current_timeout <<= 1; 444 if (count >= 0xF) 445 break; 446 } 447 448 if (count >= 0xF) { 449 printk(KERN_WARNING "%s: Too large timeout requested!\n", 450 mmc_hostname(host->mmc)); 451 count = 0xE; 452 } 453 454 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); 455 456 if (host->flags & SDHCI_USE_DMA) 457 host->flags |= SDHCI_REQ_USE_DMA; 458 459 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 460 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 461 ((data->blksz * data->blocks) & 0x3))) { 462 DBG("Reverting to PIO because of transfer size (%d)\n", 463 data->blksz * data->blocks); 464 host->flags &= ~SDHCI_REQ_USE_DMA; 465 } 466 467 /* 468 * The assumption here being that alignment is the same after 469 * translation to device address space. 470 */ 471 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 472 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 473 (data->sg->offset & 0x3))) { 474 DBG("Reverting to PIO because of bad alignment\n"); 475 host->flags &= ~SDHCI_REQ_USE_DMA; 476 } 477 478 if (host->flags & SDHCI_REQ_USE_DMA) { 479 int count; 480 481 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, 482 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 483 BUG_ON(count != 1); 484 485 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); 486 } else { 487 host->cur_sg = data->sg; 488 host->num_sg = data->sg_len; 489 490 host->offset = 0; 491 host->remain = host->cur_sg->length; 492 } 493 494 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 495 writew(SDHCI_MAKE_BLKSZ(7, data->blksz), 496 host->ioaddr + SDHCI_BLOCK_SIZE); 497 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT); 498 } 499 500 static void sdhci_set_transfer_mode(struct sdhci_host *host, 501 struct mmc_data *data) 502 { 503 u16 mode; 504 505 if (data == NULL) 506 return; 507 508 WARN_ON(!host->data); 509 510 mode = SDHCI_TRNS_BLK_CNT_EN; 511 if (data->blocks > 1) 512 mode |= SDHCI_TRNS_MULTI; 513 if (data->flags & MMC_DATA_READ) 514 mode |= SDHCI_TRNS_READ; 515 if (host->flags & SDHCI_REQ_USE_DMA) 516 mode |= SDHCI_TRNS_DMA; 517 518 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE); 519 } 520 521 static void sdhci_finish_data(struct sdhci_host *host) 522 { 523 struct mmc_data *data; 524 u16 blocks; 525 526 BUG_ON(!host->data); 527 528 data = host->data; 529 host->data = NULL; 530 531 if (host->flags & SDHCI_REQ_USE_DMA) { 532 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 533 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 534 } 535 536 /* 537 * Controller doesn't count down when in single block mode. 538 */ 539 if (data->blocks == 1) 540 blocks = (data->error == 0) ? 0 : 1; 541 else 542 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); 543 data->bytes_xfered = data->blksz * (data->blocks - blocks); 544 545 if (!data->error && blocks) { 546 printk(KERN_ERR "%s: Controller signalled completion even " 547 "though there were blocks left.\n", 548 mmc_hostname(host->mmc)); 549 data->error = -EIO; 550 } 551 552 if (data->stop) { 553 /* 554 * The controller needs a reset of internal state machines 555 * upon error conditions. 556 */ 557 if (data->error) { 558 sdhci_reset(host, SDHCI_RESET_CMD); 559 sdhci_reset(host, SDHCI_RESET_DATA); 560 } 561 562 sdhci_send_command(host, data->stop); 563 } else 564 tasklet_schedule(&host->finish_tasklet); 565 } 566 567 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 568 { 569 int flags; 570 u32 mask; 571 unsigned long timeout; 572 573 WARN_ON(host->cmd); 574 575 /* Wait max 10 ms */ 576 timeout = 10; 577 578 mask = SDHCI_CMD_INHIBIT; 579 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) 580 mask |= SDHCI_DATA_INHIBIT; 581 582 /* We shouldn't wait for data inihibit for stop commands, even 583 though they might use busy signaling */ 584 if (host->mrq->data && (cmd == host->mrq->data->stop)) 585 mask &= ~SDHCI_DATA_INHIBIT; 586 587 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) { 588 if (timeout == 0) { 589 printk(KERN_ERR "%s: Controller never released " 590 "inhibit bit(s).\n", mmc_hostname(host->mmc)); 591 sdhci_dumpregs(host); 592 cmd->error = -EIO; 593 tasklet_schedule(&host->finish_tasklet); 594 return; 595 } 596 timeout--; 597 mdelay(1); 598 } 599 600 mod_timer(&host->timer, jiffies + 10 * HZ); 601 602 host->cmd = cmd; 603 604 sdhci_prepare_data(host, cmd->data); 605 606 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT); 607 608 sdhci_set_transfer_mode(host, cmd->data); 609 610 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 611 printk(KERN_ERR "%s: Unsupported response type!\n", 612 mmc_hostname(host->mmc)); 613 cmd->error = -EINVAL; 614 tasklet_schedule(&host->finish_tasklet); 615 return; 616 } 617 618 if (!(cmd->flags & MMC_RSP_PRESENT)) 619 flags = SDHCI_CMD_RESP_NONE; 620 else if (cmd->flags & MMC_RSP_136) 621 flags = SDHCI_CMD_RESP_LONG; 622 else if (cmd->flags & MMC_RSP_BUSY) 623 flags = SDHCI_CMD_RESP_SHORT_BUSY; 624 else 625 flags = SDHCI_CMD_RESP_SHORT; 626 627 if (cmd->flags & MMC_RSP_CRC) 628 flags |= SDHCI_CMD_CRC; 629 if (cmd->flags & MMC_RSP_OPCODE) 630 flags |= SDHCI_CMD_INDEX; 631 if (cmd->data) 632 flags |= SDHCI_CMD_DATA; 633 634 writew(SDHCI_MAKE_CMD(cmd->opcode, flags), 635 host->ioaddr + SDHCI_COMMAND); 636 } 637 638 static void sdhci_finish_command(struct sdhci_host *host) 639 { 640 int i; 641 642 BUG_ON(host->cmd == NULL); 643 644 if (host->cmd->flags & MMC_RSP_PRESENT) { 645 if (host->cmd->flags & MMC_RSP_136) { 646 /* CRC is stripped so we need to do some shifting. */ 647 for (i = 0;i < 4;i++) { 648 host->cmd->resp[i] = readl(host->ioaddr + 649 SDHCI_RESPONSE + (3-i)*4) << 8; 650 if (i != 3) 651 host->cmd->resp[i] |= 652 readb(host->ioaddr + 653 SDHCI_RESPONSE + (3-i)*4-1); 654 } 655 } else { 656 host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE); 657 } 658 } 659 660 host->cmd->error = 0; 661 662 if (host->data && host->data_early) 663 sdhci_finish_data(host); 664 665 if (!host->cmd->data) 666 tasklet_schedule(&host->finish_tasklet); 667 668 host->cmd = NULL; 669 } 670 671 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 672 { 673 int div; 674 u16 clk; 675 unsigned long timeout; 676 677 if (clock == host->clock) 678 return; 679 680 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); 681 682 if (clock == 0) 683 goto out; 684 685 for (div = 1;div < 256;div *= 2) { 686 if ((host->max_clk / div) <= clock) 687 break; 688 } 689 div >>= 1; 690 691 clk = div << SDHCI_DIVIDER_SHIFT; 692 clk |= SDHCI_CLOCK_INT_EN; 693 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL); 694 695 /* Wait max 10 ms */ 696 timeout = 10; 697 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL)) 698 & SDHCI_CLOCK_INT_STABLE)) { 699 if (timeout == 0) { 700 printk(KERN_ERR "%s: Internal clock never " 701 "stabilised.\n", mmc_hostname(host->mmc)); 702 sdhci_dumpregs(host); 703 return; 704 } 705 timeout--; 706 mdelay(1); 707 } 708 709 clk |= SDHCI_CLOCK_CARD_EN; 710 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL); 711 712 out: 713 host->clock = clock; 714 } 715 716 static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 717 { 718 u8 pwr; 719 720 if (host->power == power) 721 return; 722 723 if (power == (unsigned short)-1) { 724 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 725 goto out; 726 } 727 728 /* 729 * Spec says that we should clear the power reg before setting 730 * a new value. Some controllers don't seem to like this though. 731 */ 732 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 733 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 734 735 pwr = SDHCI_POWER_ON; 736 737 switch (1 << power) { 738 case MMC_VDD_165_195: 739 pwr |= SDHCI_POWER_180; 740 break; 741 case MMC_VDD_29_30: 742 case MMC_VDD_30_31: 743 pwr |= SDHCI_POWER_300; 744 break; 745 case MMC_VDD_32_33: 746 case MMC_VDD_33_34: 747 pwr |= SDHCI_POWER_330; 748 break; 749 default: 750 BUG(); 751 } 752 753 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL); 754 755 out: 756 host->power = power; 757 } 758 759 /*****************************************************************************\ 760 * * 761 * MMC callbacks * 762 * * 763 \*****************************************************************************/ 764 765 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 766 { 767 struct sdhci_host *host; 768 unsigned long flags; 769 770 host = mmc_priv(mmc); 771 772 spin_lock_irqsave(&host->lock, flags); 773 774 WARN_ON(host->mrq != NULL); 775 776 sdhci_activate_led(host); 777 778 host->mrq = mrq; 779 780 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 781 host->mrq->cmd->error = -ENOMEDIUM; 782 tasklet_schedule(&host->finish_tasklet); 783 } else 784 sdhci_send_command(host, mrq->cmd); 785 786 mmiowb(); 787 spin_unlock_irqrestore(&host->lock, flags); 788 } 789 790 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 791 { 792 struct sdhci_host *host; 793 unsigned long flags; 794 u8 ctrl; 795 796 host = mmc_priv(mmc); 797 798 spin_lock_irqsave(&host->lock, flags); 799 800 /* 801 * Reset the chip on each power off. 802 * Should clear out any weird states. 803 */ 804 if (ios->power_mode == MMC_POWER_OFF) { 805 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE); 806 sdhci_init(host); 807 } 808 809 sdhci_set_clock(host, ios->clock); 810 811 if (ios->power_mode == MMC_POWER_OFF) 812 sdhci_set_power(host, -1); 813 else 814 sdhci_set_power(host, ios->vdd); 815 816 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); 817 818 if (ios->bus_width == MMC_BUS_WIDTH_4) 819 ctrl |= SDHCI_CTRL_4BITBUS; 820 else 821 ctrl &= ~SDHCI_CTRL_4BITBUS; 822 823 if (ios->timing == MMC_TIMING_SD_HS) 824 ctrl |= SDHCI_CTRL_HISPD; 825 else 826 ctrl &= ~SDHCI_CTRL_HISPD; 827 828 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 829 830 /* 831 * Some (ENE) controllers go apeshit on some ios operation, 832 * signalling timeout and CRC errors even on CMD0. Resetting 833 * it on each ios seems to solve the problem. 834 */ 835 if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 836 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 837 838 mmiowb(); 839 spin_unlock_irqrestore(&host->lock, flags); 840 } 841 842 static int sdhci_get_ro(struct mmc_host *mmc) 843 { 844 struct sdhci_host *host; 845 unsigned long flags; 846 int present; 847 848 host = mmc_priv(mmc); 849 850 spin_lock_irqsave(&host->lock, flags); 851 852 present = readl(host->ioaddr + SDHCI_PRESENT_STATE); 853 854 spin_unlock_irqrestore(&host->lock, flags); 855 856 return !(present & SDHCI_WRITE_PROTECT); 857 } 858 859 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 860 { 861 struct sdhci_host *host; 862 unsigned long flags; 863 u32 ier; 864 865 host = mmc_priv(mmc); 866 867 spin_lock_irqsave(&host->lock, flags); 868 869 ier = readl(host->ioaddr + SDHCI_INT_ENABLE); 870 871 ier &= ~SDHCI_INT_CARD_INT; 872 if (enable) 873 ier |= SDHCI_INT_CARD_INT; 874 875 writel(ier, host->ioaddr + SDHCI_INT_ENABLE); 876 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); 877 878 mmiowb(); 879 880 spin_unlock_irqrestore(&host->lock, flags); 881 } 882 883 static const struct mmc_host_ops sdhci_ops = { 884 .request = sdhci_request, 885 .set_ios = sdhci_set_ios, 886 .get_ro = sdhci_get_ro, 887 .enable_sdio_irq = sdhci_enable_sdio_irq, 888 }; 889 890 /*****************************************************************************\ 891 * * 892 * Tasklets * 893 * * 894 \*****************************************************************************/ 895 896 static void sdhci_tasklet_card(unsigned long param) 897 { 898 struct sdhci_host *host; 899 unsigned long flags; 900 901 host = (struct sdhci_host*)param; 902 903 spin_lock_irqsave(&host->lock, flags); 904 905 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 906 if (host->mrq) { 907 printk(KERN_ERR "%s: Card removed during transfer!\n", 908 mmc_hostname(host->mmc)); 909 printk(KERN_ERR "%s: Resetting controller.\n", 910 mmc_hostname(host->mmc)); 911 912 sdhci_reset(host, SDHCI_RESET_CMD); 913 sdhci_reset(host, SDHCI_RESET_DATA); 914 915 host->mrq->cmd->error = -ENOMEDIUM; 916 tasklet_schedule(&host->finish_tasklet); 917 } 918 } 919 920 spin_unlock_irqrestore(&host->lock, flags); 921 922 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 923 } 924 925 static void sdhci_tasklet_finish(unsigned long param) 926 { 927 struct sdhci_host *host; 928 unsigned long flags; 929 struct mmc_request *mrq; 930 931 host = (struct sdhci_host*)param; 932 933 spin_lock_irqsave(&host->lock, flags); 934 935 del_timer(&host->timer); 936 937 mrq = host->mrq; 938 939 /* 940 * The controller needs a reset of internal state machines 941 * upon error conditions. 942 */ 943 if (mrq->cmd->error || 944 (mrq->data && (mrq->data->error || 945 (mrq->data->stop && mrq->data->stop->error))) || 946 (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 947 948 /* Some controllers need this kick or reset won't work here */ 949 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 950 unsigned int clock; 951 952 /* This is to force an update */ 953 clock = host->clock; 954 host->clock = 0; 955 sdhci_set_clock(host, clock); 956 } 957 958 /* Spec says we should do both at the same time, but Ricoh 959 controllers do not like that. */ 960 sdhci_reset(host, SDHCI_RESET_CMD); 961 sdhci_reset(host, SDHCI_RESET_DATA); 962 } 963 964 host->mrq = NULL; 965 host->cmd = NULL; 966 host->data = NULL; 967 968 sdhci_deactivate_led(host); 969 970 mmiowb(); 971 spin_unlock_irqrestore(&host->lock, flags); 972 973 mmc_request_done(host->mmc, mrq); 974 } 975 976 static void sdhci_timeout_timer(unsigned long data) 977 { 978 struct sdhci_host *host; 979 unsigned long flags; 980 981 host = (struct sdhci_host*)data; 982 983 spin_lock_irqsave(&host->lock, flags); 984 985 if (host->mrq) { 986 printk(KERN_ERR "%s: Timeout waiting for hardware " 987 "interrupt.\n", mmc_hostname(host->mmc)); 988 sdhci_dumpregs(host); 989 990 if (host->data) { 991 host->data->error = -ETIMEDOUT; 992 sdhci_finish_data(host); 993 } else { 994 if (host->cmd) 995 host->cmd->error = -ETIMEDOUT; 996 else 997 host->mrq->cmd->error = -ETIMEDOUT; 998 999 tasklet_schedule(&host->finish_tasklet); 1000 } 1001 } 1002 1003 mmiowb(); 1004 spin_unlock_irqrestore(&host->lock, flags); 1005 } 1006 1007 /*****************************************************************************\ 1008 * * 1009 * Interrupt handling * 1010 * * 1011 \*****************************************************************************/ 1012 1013 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) 1014 { 1015 BUG_ON(intmask == 0); 1016 1017 if (!host->cmd) { 1018 printk(KERN_ERR "%s: Got command interrupt 0x%08x even " 1019 "though no command operation was in progress.\n", 1020 mmc_hostname(host->mmc), (unsigned)intmask); 1021 sdhci_dumpregs(host); 1022 return; 1023 } 1024 1025 if (intmask & SDHCI_INT_TIMEOUT) 1026 host->cmd->error = -ETIMEDOUT; 1027 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | 1028 SDHCI_INT_INDEX)) 1029 host->cmd->error = -EILSEQ; 1030 1031 if (host->cmd->error) 1032 tasklet_schedule(&host->finish_tasklet); 1033 else if (intmask & SDHCI_INT_RESPONSE) 1034 sdhci_finish_command(host); 1035 } 1036 1037 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 1038 { 1039 BUG_ON(intmask == 0); 1040 1041 if (!host->data) { 1042 /* 1043 * A data end interrupt is sent together with the response 1044 * for the stop command. 1045 */ 1046 if (intmask & SDHCI_INT_DATA_END) 1047 return; 1048 1049 printk(KERN_ERR "%s: Got data interrupt 0x%08x even " 1050 "though no data operation was in progress.\n", 1051 mmc_hostname(host->mmc), (unsigned)intmask); 1052 sdhci_dumpregs(host); 1053 1054 return; 1055 } 1056 1057 if (intmask & SDHCI_INT_DATA_TIMEOUT) 1058 host->data->error = -ETIMEDOUT; 1059 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1060 host->data->error = -EILSEQ; 1061 1062 if (host->data->error) 1063 sdhci_finish_data(host); 1064 else { 1065 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 1066 sdhci_transfer_pio(host); 1067 1068 /* 1069 * We currently don't do anything fancy with DMA 1070 * boundaries, but as we can't disable the feature 1071 * we need to at least restart the transfer. 1072 */ 1073 if (intmask & SDHCI_INT_DMA_END) 1074 writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS), 1075 host->ioaddr + SDHCI_DMA_ADDRESS); 1076 1077 if (intmask & SDHCI_INT_DATA_END) { 1078 if (host->cmd) { 1079 /* 1080 * Data managed to finish before the 1081 * command completed. Make sure we do 1082 * things in the proper order. 1083 */ 1084 host->data_early = 1; 1085 } else { 1086 sdhci_finish_data(host); 1087 } 1088 } 1089 } 1090 } 1091 1092 static irqreturn_t sdhci_irq(int irq, void *dev_id) 1093 { 1094 irqreturn_t result; 1095 struct sdhci_host* host = dev_id; 1096 u32 intmask; 1097 int cardint = 0; 1098 1099 spin_lock(&host->lock); 1100 1101 intmask = readl(host->ioaddr + SDHCI_INT_STATUS); 1102 1103 if (!intmask || intmask == 0xffffffff) { 1104 result = IRQ_NONE; 1105 goto out; 1106 } 1107 1108 DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask); 1109 1110 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 1111 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE), 1112 host->ioaddr + SDHCI_INT_STATUS); 1113 tasklet_schedule(&host->card_tasklet); 1114 } 1115 1116 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 1117 1118 if (intmask & SDHCI_INT_CMD_MASK) { 1119 writel(intmask & SDHCI_INT_CMD_MASK, 1120 host->ioaddr + SDHCI_INT_STATUS); 1121 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); 1122 } 1123 1124 if (intmask & SDHCI_INT_DATA_MASK) { 1125 writel(intmask & SDHCI_INT_DATA_MASK, 1126 host->ioaddr + SDHCI_INT_STATUS); 1127 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 1128 } 1129 1130 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 1131 1132 intmask &= ~SDHCI_INT_ERROR; 1133 1134 if (intmask & SDHCI_INT_BUS_POWER) { 1135 printk(KERN_ERR "%s: Card is consuming too much power!\n", 1136 mmc_hostname(host->mmc)); 1137 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS); 1138 } 1139 1140 intmask &= ~SDHCI_INT_BUS_POWER; 1141 1142 if (intmask & SDHCI_INT_CARD_INT) 1143 cardint = 1; 1144 1145 intmask &= ~SDHCI_INT_CARD_INT; 1146 1147 if (intmask) { 1148 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n", 1149 mmc_hostname(host->mmc), intmask); 1150 sdhci_dumpregs(host); 1151 1152 writel(intmask, host->ioaddr + SDHCI_INT_STATUS); 1153 } 1154 1155 result = IRQ_HANDLED; 1156 1157 mmiowb(); 1158 out: 1159 spin_unlock(&host->lock); 1160 1161 /* 1162 * We have to delay this as it calls back into the driver. 1163 */ 1164 if (cardint) 1165 mmc_signal_sdio_irq(host->mmc); 1166 1167 return result; 1168 } 1169 1170 /*****************************************************************************\ 1171 * * 1172 * Suspend/resume * 1173 * * 1174 \*****************************************************************************/ 1175 1176 #ifdef CONFIG_PM 1177 1178 static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) 1179 { 1180 struct sdhci_chip *chip; 1181 int i, ret; 1182 1183 chip = pci_get_drvdata(pdev); 1184 if (!chip) 1185 return 0; 1186 1187 DBG("Suspending...\n"); 1188 1189 for (i = 0;i < chip->num_slots;i++) { 1190 if (!chip->hosts[i]) 1191 continue; 1192 ret = mmc_suspend_host(chip->hosts[i]->mmc, state); 1193 if (ret) { 1194 for (i--;i >= 0;i--) 1195 mmc_resume_host(chip->hosts[i]->mmc); 1196 return ret; 1197 } 1198 } 1199 1200 pci_save_state(pdev); 1201 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 1202 1203 for (i = 0;i < chip->num_slots;i++) { 1204 if (!chip->hosts[i]) 1205 continue; 1206 free_irq(chip->hosts[i]->irq, chip->hosts[i]); 1207 } 1208 1209 pci_disable_device(pdev); 1210 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1211 1212 return 0; 1213 } 1214 1215 static int sdhci_resume (struct pci_dev *pdev) 1216 { 1217 struct sdhci_chip *chip; 1218 int i, ret; 1219 1220 chip = pci_get_drvdata(pdev); 1221 if (!chip) 1222 return 0; 1223 1224 DBG("Resuming...\n"); 1225 1226 pci_set_power_state(pdev, PCI_D0); 1227 pci_restore_state(pdev); 1228 ret = pci_enable_device(pdev); 1229 if (ret) 1230 return ret; 1231 1232 for (i = 0;i < chip->num_slots;i++) { 1233 if (!chip->hosts[i]) 1234 continue; 1235 if (chip->hosts[i]->flags & SDHCI_USE_DMA) 1236 pci_set_master(pdev); 1237 ret = request_irq(chip->hosts[i]->irq, sdhci_irq, 1238 IRQF_SHARED, chip->hosts[i]->slot_descr, 1239 chip->hosts[i]); 1240 if (ret) 1241 return ret; 1242 sdhci_init(chip->hosts[i]); 1243 mmiowb(); 1244 ret = mmc_resume_host(chip->hosts[i]->mmc); 1245 if (ret) 1246 return ret; 1247 } 1248 1249 return 0; 1250 } 1251 1252 #else /* CONFIG_PM */ 1253 1254 #define sdhci_suspend NULL 1255 #define sdhci_resume NULL 1256 1257 #endif /* CONFIG_PM */ 1258 1259 /*****************************************************************************\ 1260 * * 1261 * Device probing/removal * 1262 * * 1263 \*****************************************************************************/ 1264 1265 static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) 1266 { 1267 int ret; 1268 unsigned int version; 1269 struct sdhci_chip *chip; 1270 struct mmc_host *mmc; 1271 struct sdhci_host *host; 1272 1273 u8 first_bar; 1274 unsigned int caps; 1275 1276 chip = pci_get_drvdata(pdev); 1277 BUG_ON(!chip); 1278 1279 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); 1280 if (ret) 1281 return ret; 1282 1283 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; 1284 1285 if (first_bar > 5) { 1286 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n"); 1287 return -ENODEV; 1288 } 1289 1290 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) { 1291 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n"); 1292 return -ENODEV; 1293 } 1294 1295 if (pci_resource_len(pdev, first_bar + slot) != 0x100) { 1296 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. " 1297 "You may experience problems.\n"); 1298 } 1299 1300 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 1301 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n"); 1302 return -ENODEV; 1303 } 1304 1305 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { 1306 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n"); 1307 return -ENODEV; 1308 } 1309 1310 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); 1311 if (!mmc) 1312 return -ENOMEM; 1313 1314 host = mmc_priv(mmc); 1315 host->mmc = mmc; 1316 1317 host->chip = chip; 1318 chip->hosts[slot] = host; 1319 1320 host->bar = first_bar + slot; 1321 1322 host->addr = pci_resource_start(pdev, host->bar); 1323 host->irq = pdev->irq; 1324 1325 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); 1326 1327 snprintf(host->slot_descr, 20, "sdhc%d:slot%d", chip->index, slot); 1328 1329 ret = pci_request_region(pdev, host->bar, host->slot_descr); 1330 if (ret) 1331 goto free; 1332 1333 host->ioaddr = ioremap_nocache(host->addr, 1334 pci_resource_len(pdev, host->bar)); 1335 if (!host->ioaddr) { 1336 ret = -ENOMEM; 1337 goto release; 1338 } 1339 1340 sdhci_reset(host, SDHCI_RESET_ALL); 1341 1342 version = readw(host->ioaddr + SDHCI_HOST_VERSION); 1343 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 1344 if (version > 1) { 1345 printk(KERN_ERR "%s: Unknown controller version (%d). " 1346 "You may experience problems.\n", host->slot_descr, 1347 version); 1348 } 1349 1350 caps = readl(host->ioaddr + SDHCI_CAPABILITIES); 1351 1352 if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) 1353 host->flags |= SDHCI_USE_DMA; 1354 else if (!(caps & SDHCI_CAN_DO_DMA)) 1355 DBG("Controller doesn't have DMA capability\n"); 1356 else 1357 host->flags |= SDHCI_USE_DMA; 1358 1359 if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1360 (host->flags & SDHCI_USE_DMA)) { 1361 DBG("Disabling DMA as it is marked broken\n"); 1362 host->flags &= ~SDHCI_USE_DMA; 1363 } 1364 1365 if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1366 (host->flags & SDHCI_USE_DMA)) { 1367 printk(KERN_WARNING "%s: Will use DMA " 1368 "mode even though HW doesn't fully " 1369 "claim to support it.\n", host->slot_descr); 1370 } 1371 1372 if (host->flags & SDHCI_USE_DMA) { 1373 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 1374 printk(KERN_WARNING "%s: No suitable DMA available. " 1375 "Falling back to PIO.\n", host->slot_descr); 1376 host->flags &= ~SDHCI_USE_DMA; 1377 } 1378 } 1379 1380 if (host->flags & SDHCI_USE_DMA) 1381 pci_set_master(pdev); 1382 else /* XXX: Hack to get MMC layer to avoid highmem */ 1383 pdev->dma_mask = 0; 1384 1385 host->max_clk = 1386 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1387 if (host->max_clk == 0) { 1388 printk(KERN_ERR "%s: Hardware doesn't specify base clock " 1389 "frequency.\n", host->slot_descr); 1390 ret = -ENODEV; 1391 goto unmap; 1392 } 1393 host->max_clk *= 1000000; 1394 1395 host->timeout_clk = 1396 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 1397 if (host->timeout_clk == 0) { 1398 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " 1399 "frequency.\n", host->slot_descr); 1400 ret = -ENODEV; 1401 goto unmap; 1402 } 1403 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1404 host->timeout_clk *= 1000; 1405 1406 /* 1407 * Set host parameters. 1408 */ 1409 mmc->ops = &sdhci_ops; 1410 mmc->f_min = host->max_clk / 256; 1411 mmc->f_max = host->max_clk; 1412 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; 1413 1414 if (caps & SDHCI_CAN_DO_HISPD) 1415 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1416 1417 mmc->ocr_avail = 0; 1418 if (caps & SDHCI_CAN_VDD_330) 1419 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1420 if (caps & SDHCI_CAN_VDD_300) 1421 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1422 if (caps & SDHCI_CAN_VDD_180) 1423 mmc->ocr_avail |= MMC_VDD_165_195; 1424 1425 if (mmc->ocr_avail == 0) { 1426 printk(KERN_ERR "%s: Hardware doesn't report any " 1427 "support voltages.\n", host->slot_descr); 1428 ret = -ENODEV; 1429 goto unmap; 1430 } 1431 1432 spin_lock_init(&host->lock); 1433 1434 /* 1435 * Maximum number of segments. Hardware cannot do scatter lists. 1436 */ 1437 if (host->flags & SDHCI_USE_DMA) 1438 mmc->max_hw_segs = 1; 1439 else 1440 mmc->max_hw_segs = 16; 1441 mmc->max_phys_segs = 16; 1442 1443 /* 1444 * Maximum number of sectors in one transfer. Limited by DMA boundary 1445 * size (512KiB). 1446 */ 1447 mmc->max_req_size = 524288; 1448 1449 /* 1450 * Maximum segment size. Could be one segment with the maximum number 1451 * of bytes. 1452 */ 1453 mmc->max_seg_size = mmc->max_req_size; 1454 1455 /* 1456 * Maximum block size. This varies from controller to controller and 1457 * is specified in the capabilities register. 1458 */ 1459 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; 1460 if (mmc->max_blk_size >= 3) { 1461 printk(KERN_WARNING "%s: Invalid maximum block size, assuming 512\n", 1462 host->slot_descr); 1463 mmc->max_blk_size = 512; 1464 } else 1465 mmc->max_blk_size = 512 << mmc->max_blk_size; 1466 1467 /* 1468 * Maximum block count. 1469 */ 1470 mmc->max_blk_count = 65535; 1471 1472 /* 1473 * Init tasklets. 1474 */ 1475 tasklet_init(&host->card_tasklet, 1476 sdhci_tasklet_card, (unsigned long)host); 1477 tasklet_init(&host->finish_tasklet, 1478 sdhci_tasklet_finish, (unsigned long)host); 1479 1480 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 1481 1482 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 1483 host->slot_descr, host); 1484 if (ret) 1485 goto untasklet; 1486 1487 sdhci_init(host); 1488 1489 #ifdef CONFIG_MMC_DEBUG 1490 sdhci_dumpregs(host); 1491 #endif 1492 1493 mmiowb(); 1494 1495 mmc_add_host(mmc); 1496 1497 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", mmc_hostname(mmc), 1498 host->addr, host->irq, 1499 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1500 1501 return 0; 1502 1503 untasklet: 1504 tasklet_kill(&host->card_tasklet); 1505 tasklet_kill(&host->finish_tasklet); 1506 unmap: 1507 iounmap(host->ioaddr); 1508 release: 1509 pci_release_region(pdev, host->bar); 1510 free: 1511 mmc_free_host(mmc); 1512 1513 return ret; 1514 } 1515 1516 static void sdhci_remove_slot(struct pci_dev *pdev, int slot) 1517 { 1518 struct sdhci_chip *chip; 1519 struct mmc_host *mmc; 1520 struct sdhci_host *host; 1521 1522 chip = pci_get_drvdata(pdev); 1523 host = chip->hosts[slot]; 1524 mmc = host->mmc; 1525 1526 chip->hosts[slot] = NULL; 1527 1528 mmc_remove_host(mmc); 1529 1530 sdhci_reset(host, SDHCI_RESET_ALL); 1531 1532 free_irq(host->irq, host); 1533 1534 del_timer_sync(&host->timer); 1535 1536 tasklet_kill(&host->card_tasklet); 1537 tasklet_kill(&host->finish_tasklet); 1538 1539 iounmap(host->ioaddr); 1540 1541 pci_release_region(pdev, host->bar); 1542 1543 mmc_free_host(mmc); 1544 } 1545 1546 static int __devinit sdhci_probe(struct pci_dev *pdev, 1547 const struct pci_device_id *ent) 1548 { 1549 int ret, i; 1550 u8 slots, rev; 1551 struct sdhci_chip *chip; 1552 1553 BUG_ON(pdev == NULL); 1554 BUG_ON(ent == NULL); 1555 1556 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 1557 1558 printk(KERN_INFO DRIVER_NAME 1559 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n", 1560 pci_name(pdev), (int)pdev->vendor, (int)pdev->device, 1561 (int)rev); 1562 1563 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1564 if (ret) 1565 return ret; 1566 1567 slots = PCI_SLOT_INFO_SLOTS(slots) + 1; 1568 DBG("found %d slot(s)\n", slots); 1569 if (slots == 0) 1570 return -ENODEV; 1571 1572 ret = pci_enable_device(pdev); 1573 if (ret) 1574 return ret; 1575 1576 chip = kzalloc(sizeof(struct sdhci_chip) + 1577 sizeof(struct sdhci_host*) * slots, GFP_KERNEL); 1578 if (!chip) { 1579 ret = -ENOMEM; 1580 goto err; 1581 } 1582 1583 chip->pdev = pdev; 1584 chip->quirks = ent->driver_data; 1585 1586 if (debug_quirks) 1587 chip->quirks = debug_quirks; 1588 1589 chip->num_slots = slots; 1590 pci_set_drvdata(pdev, chip); 1591 1592 /* Add for multi controller case */ 1593 spin_lock(&index_lock); 1594 chip->index = chip_index++; 1595 spin_unlock(&index_lock); 1596 1597 for (i = 0;i < slots;i++) { 1598 ret = sdhci_probe_slot(pdev, i); 1599 if (ret) { 1600 for (i--;i >= 0;i--) 1601 sdhci_remove_slot(pdev, i); 1602 goto free; 1603 } 1604 } 1605 1606 return 0; 1607 1608 free: 1609 pci_set_drvdata(pdev, NULL); 1610 kfree(chip); 1611 1612 err: 1613 pci_disable_device(pdev); 1614 return ret; 1615 } 1616 1617 static void __devexit sdhci_remove(struct pci_dev *pdev) 1618 { 1619 int i; 1620 struct sdhci_chip *chip; 1621 1622 chip = pci_get_drvdata(pdev); 1623 1624 if (chip) { 1625 for (i = 0;i < chip->num_slots;i++) 1626 sdhci_remove_slot(pdev, i); 1627 1628 pci_set_drvdata(pdev, NULL); 1629 1630 kfree(chip); 1631 } 1632 1633 pci_disable_device(pdev); 1634 } 1635 1636 static struct pci_driver sdhci_driver = { 1637 .name = DRIVER_NAME, 1638 .id_table = pci_ids, 1639 .probe = sdhci_probe, 1640 .remove = __devexit_p(sdhci_remove), 1641 .suspend = sdhci_suspend, 1642 .resume = sdhci_resume, 1643 }; 1644 1645 /*****************************************************************************\ 1646 * * 1647 * Driver init/exit * 1648 * * 1649 \*****************************************************************************/ 1650 1651 static int __init sdhci_drv_init(void) 1652 { 1653 printk(KERN_INFO DRIVER_NAME 1654 ": Secure Digital Host Controller Interface driver\n"); 1655 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 1656 1657 spin_lock_init(&index_lock); 1658 1659 return pci_register_driver(&sdhci_driver); 1660 } 1661 1662 static void __exit sdhci_drv_exit(void) 1663 { 1664 DBG("Exiting\n"); 1665 1666 pci_unregister_driver(&sdhci_driver); 1667 } 1668 1669 module_init(sdhci_drv_init); 1670 module_exit(sdhci_drv_exit); 1671 1672 module_param(debug_quirks, uint, 0444); 1673 1674 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 1675 MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); 1676 MODULE_LICENSE("GPL"); 1677 1678 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 1679