1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 26 #include <linux/mmc/card.h> 27 #include <linux/mmc/host.h> 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/sd.h> 30 31 #include "core.h" 32 #include "bus.h" 33 #include "host.h" 34 #include "sdio_bus.h" 35 36 #include "mmc_ops.h" 37 #include "sd_ops.h" 38 #include "sdio_ops.h" 39 40 static struct workqueue_struct *workqueue; 41 42 /* 43 * Enabling software CRCs on the data blocks can be a significant (30%) 44 * performance cost, and for other reasons may not always be desired. 45 * So we allow it it to be disabled. 46 */ 47 int use_spi_crc = 1; 48 module_param(use_spi_crc, bool, 0); 49 50 /* 51 * We normally treat cards as removed during suspend if they are not 52 * known to be on a non-removable bus, to avoid the risk of writing 53 * back data to a different card after resume. Allow this to be 54 * overridden if necessary. 55 */ 56 #ifdef CONFIG_MMC_UNSAFE_RESUME 57 int mmc_assume_removable; 58 #else 59 int mmc_assume_removable = 1; 60 #endif 61 module_param_named(removable, mmc_assume_removable, bool, 0644); 62 MODULE_PARM_DESC( 63 removable, 64 "MMC/SD cards are removable and may be removed during suspend"); 65 66 /* 67 * Internal function. Schedule delayed work in the MMC work queue. 68 */ 69 static int mmc_schedule_delayed_work(struct delayed_work *work, 70 unsigned long delay) 71 { 72 return queue_delayed_work(workqueue, work, delay); 73 } 74 75 /* 76 * Internal function. Flush all scheduled work from the MMC work queue. 77 */ 78 static void mmc_flush_scheduled_work(void) 79 { 80 flush_workqueue(workqueue); 81 } 82 83 /** 84 * mmc_request_done - finish processing an MMC request 85 * @host: MMC host which completed request 86 * @mrq: MMC request which request 87 * 88 * MMC drivers should call this function when they have completed 89 * their processing of a request. 90 */ 91 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 92 { 93 struct mmc_command *cmd = mrq->cmd; 94 int err = cmd->error; 95 96 if (err && cmd->retries && mmc_host_is_spi(host)) { 97 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 98 cmd->retries = 0; 99 } 100 101 if (err && cmd->retries) { 102 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 103 mmc_hostname(host), cmd->opcode, err); 104 105 cmd->retries--; 106 cmd->error = 0; 107 host->ops->request(host, mrq); 108 } else { 109 led_trigger_event(host->led, LED_OFF); 110 111 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 112 mmc_hostname(host), cmd->opcode, err, 113 cmd->resp[0], cmd->resp[1], 114 cmd->resp[2], cmd->resp[3]); 115 116 if (mrq->data) { 117 pr_debug("%s: %d bytes transferred: %d\n", 118 mmc_hostname(host), 119 mrq->data->bytes_xfered, mrq->data->error); 120 } 121 122 if (mrq->stop) { 123 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 124 mmc_hostname(host), mrq->stop->opcode, 125 mrq->stop->error, 126 mrq->stop->resp[0], mrq->stop->resp[1], 127 mrq->stop->resp[2], mrq->stop->resp[3]); 128 } 129 130 if (mrq->done) 131 mrq->done(mrq); 132 } 133 } 134 135 EXPORT_SYMBOL(mmc_request_done); 136 137 static void 138 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 139 { 140 #ifdef CONFIG_MMC_DEBUG 141 unsigned int i, sz; 142 struct scatterlist *sg; 143 #endif 144 145 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 146 mmc_hostname(host), mrq->cmd->opcode, 147 mrq->cmd->arg, mrq->cmd->flags); 148 149 if (mrq->data) { 150 pr_debug("%s: blksz %d blocks %d flags %08x " 151 "tsac %d ms nsac %d\n", 152 mmc_hostname(host), mrq->data->blksz, 153 mrq->data->blocks, mrq->data->flags, 154 mrq->data->timeout_ns / 1000000, 155 mrq->data->timeout_clks); 156 } 157 158 if (mrq->stop) { 159 pr_debug("%s: CMD%u arg %08x flags %08x\n", 160 mmc_hostname(host), mrq->stop->opcode, 161 mrq->stop->arg, mrq->stop->flags); 162 } 163 164 WARN_ON(!host->claimed); 165 166 led_trigger_event(host->led, LED_FULL); 167 168 mrq->cmd->error = 0; 169 mrq->cmd->mrq = mrq; 170 if (mrq->data) { 171 BUG_ON(mrq->data->blksz > host->max_blk_size); 172 BUG_ON(mrq->data->blocks > host->max_blk_count); 173 BUG_ON(mrq->data->blocks * mrq->data->blksz > 174 host->max_req_size); 175 176 #ifdef CONFIG_MMC_DEBUG 177 sz = 0; 178 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 179 sz += sg->length; 180 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 181 #endif 182 183 mrq->cmd->data = mrq->data; 184 mrq->data->error = 0; 185 mrq->data->mrq = mrq; 186 if (mrq->stop) { 187 mrq->data->stop = mrq->stop; 188 mrq->stop->error = 0; 189 mrq->stop->mrq = mrq; 190 } 191 } 192 host->ops->request(host, mrq); 193 } 194 195 static void mmc_wait_done(struct mmc_request *mrq) 196 { 197 complete(mrq->done_data); 198 } 199 200 /** 201 * mmc_wait_for_req - start a request and wait for completion 202 * @host: MMC host to start command 203 * @mrq: MMC request to start 204 * 205 * Start a new MMC custom command request for a host, and wait 206 * for the command to complete. Does not attempt to parse the 207 * response. 208 */ 209 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 210 { 211 DECLARE_COMPLETION_ONSTACK(complete); 212 213 mrq->done_data = &complete; 214 mrq->done = mmc_wait_done; 215 216 mmc_start_request(host, mrq); 217 218 wait_for_completion(&complete); 219 } 220 221 EXPORT_SYMBOL(mmc_wait_for_req); 222 223 /** 224 * mmc_wait_for_cmd - start a command and wait for completion 225 * @host: MMC host to start command 226 * @cmd: MMC command to start 227 * @retries: maximum number of retries 228 * 229 * Start a new MMC command for a host, and wait for the command 230 * to complete. Return any error that occurred while the command 231 * was executing. Do not attempt to parse the response. 232 */ 233 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 234 { 235 struct mmc_request mrq; 236 237 WARN_ON(!host->claimed); 238 239 memset(&mrq, 0, sizeof(struct mmc_request)); 240 241 memset(cmd->resp, 0, sizeof(cmd->resp)); 242 cmd->retries = retries; 243 244 mrq.cmd = cmd; 245 cmd->data = NULL; 246 247 mmc_wait_for_req(host, &mrq); 248 249 return cmd->error; 250 } 251 252 EXPORT_SYMBOL(mmc_wait_for_cmd); 253 254 /** 255 * mmc_set_data_timeout - set the timeout for a data command 256 * @data: data phase for command 257 * @card: the MMC card associated with the data transfer 258 * 259 * Computes the data timeout parameters according to the 260 * correct algorithm given the card type. 261 */ 262 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 263 { 264 unsigned int mult; 265 266 /* 267 * SDIO cards only define an upper 1 s limit on access. 268 */ 269 if (mmc_card_sdio(card)) { 270 data->timeout_ns = 1000000000; 271 data->timeout_clks = 0; 272 return; 273 } 274 275 /* 276 * SD cards use a 100 multiplier rather than 10 277 */ 278 mult = mmc_card_sd(card) ? 100 : 10; 279 280 /* 281 * Scale up the multiplier (and therefore the timeout) by 282 * the r2w factor for writes. 283 */ 284 if (data->flags & MMC_DATA_WRITE) 285 mult <<= card->csd.r2w_factor; 286 287 data->timeout_ns = card->csd.tacc_ns * mult; 288 data->timeout_clks = card->csd.tacc_clks * mult; 289 290 /* 291 * SD cards also have an upper limit on the timeout. 292 */ 293 if (mmc_card_sd(card)) { 294 unsigned int timeout_us, limit_us; 295 296 timeout_us = data->timeout_ns / 1000; 297 timeout_us += data->timeout_clks * 1000 / 298 (card->host->ios.clock / 1000); 299 300 if (data->flags & MMC_DATA_WRITE) 301 /* 302 * The limit is really 250 ms, but that is 303 * insufficient for some crappy cards. 304 */ 305 limit_us = 300000; 306 else 307 limit_us = 100000; 308 309 /* 310 * SDHC cards always use these fixed values. 311 */ 312 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 313 data->timeout_ns = limit_us * 1000; 314 data->timeout_clks = 0; 315 } 316 } 317 /* 318 * Some cards need very high timeouts if driven in SPI mode. 319 * The worst observed timeout was 900ms after writing a 320 * continuous stream of data until the internal logic 321 * overflowed. 322 */ 323 if (mmc_host_is_spi(card->host)) { 324 if (data->flags & MMC_DATA_WRITE) { 325 if (data->timeout_ns < 1000000000) 326 data->timeout_ns = 1000000000; /* 1s */ 327 } else { 328 if (data->timeout_ns < 100000000) 329 data->timeout_ns = 100000000; /* 100ms */ 330 } 331 } 332 } 333 EXPORT_SYMBOL(mmc_set_data_timeout); 334 335 /** 336 * mmc_align_data_size - pads a transfer size to a more optimal value 337 * @card: the MMC card associated with the data transfer 338 * @sz: original transfer size 339 * 340 * Pads the original data size with a number of extra bytes in 341 * order to avoid controller bugs and/or performance hits 342 * (e.g. some controllers revert to PIO for certain sizes). 343 * 344 * Returns the improved size, which might be unmodified. 345 * 346 * Note that this function is only relevant when issuing a 347 * single scatter gather entry. 348 */ 349 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 350 { 351 /* 352 * FIXME: We don't have a system for the controller to tell 353 * the core about its problems yet, so for now we just 32-bit 354 * align the size. 355 */ 356 sz = ((sz + 3) / 4) * 4; 357 358 return sz; 359 } 360 EXPORT_SYMBOL(mmc_align_data_size); 361 362 /** 363 * mmc_host_enable - enable a host. 364 * @host: mmc host to enable 365 * 366 * Hosts that support power saving can use the 'enable' and 'disable' 367 * methods to exit and enter power saving states. For more information 368 * see comments for struct mmc_host_ops. 369 */ 370 int mmc_host_enable(struct mmc_host *host) 371 { 372 if (!(host->caps & MMC_CAP_DISABLE)) 373 return 0; 374 375 if (host->en_dis_recurs) 376 return 0; 377 378 if (host->nesting_cnt++) 379 return 0; 380 381 cancel_delayed_work_sync(&host->disable); 382 383 if (host->enabled) 384 return 0; 385 386 if (host->ops->enable) { 387 int err; 388 389 host->en_dis_recurs = 1; 390 err = host->ops->enable(host); 391 host->en_dis_recurs = 0; 392 393 if (err) { 394 pr_debug("%s: enable error %d\n", 395 mmc_hostname(host), err); 396 return err; 397 } 398 } 399 host->enabled = 1; 400 return 0; 401 } 402 EXPORT_SYMBOL(mmc_host_enable); 403 404 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 405 { 406 if (host->ops->disable) { 407 int err; 408 409 host->en_dis_recurs = 1; 410 err = host->ops->disable(host, lazy); 411 host->en_dis_recurs = 0; 412 413 if (err < 0) { 414 pr_debug("%s: disable error %d\n", 415 mmc_hostname(host), err); 416 return err; 417 } 418 if (err > 0) { 419 unsigned long delay = msecs_to_jiffies(err); 420 421 mmc_schedule_delayed_work(&host->disable, delay); 422 } 423 } 424 host->enabled = 0; 425 return 0; 426 } 427 428 /** 429 * mmc_host_disable - disable a host. 430 * @host: mmc host to disable 431 * 432 * Hosts that support power saving can use the 'enable' and 'disable' 433 * methods to exit and enter power saving states. For more information 434 * see comments for struct mmc_host_ops. 435 */ 436 int mmc_host_disable(struct mmc_host *host) 437 { 438 int err; 439 440 if (!(host->caps & MMC_CAP_DISABLE)) 441 return 0; 442 443 if (host->en_dis_recurs) 444 return 0; 445 446 if (--host->nesting_cnt) 447 return 0; 448 449 if (!host->enabled) 450 return 0; 451 452 err = mmc_host_do_disable(host, 0); 453 return err; 454 } 455 EXPORT_SYMBOL(mmc_host_disable); 456 457 /** 458 * __mmc_claim_host - exclusively claim a host 459 * @host: mmc host to claim 460 * @abort: whether or not the operation should be aborted 461 * 462 * Claim a host for a set of operations. If @abort is non null and 463 * dereference a non-zero value then this will return prematurely with 464 * that non-zero value without acquiring the lock. Returns zero 465 * with the lock held otherwise. 466 */ 467 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 468 { 469 DECLARE_WAITQUEUE(wait, current); 470 unsigned long flags; 471 int stop; 472 473 might_sleep(); 474 475 add_wait_queue(&host->wq, &wait); 476 spin_lock_irqsave(&host->lock, flags); 477 while (1) { 478 set_current_state(TASK_UNINTERRUPTIBLE); 479 stop = abort ? atomic_read(abort) : 0; 480 if (stop || !host->claimed || host->claimer == current) 481 break; 482 spin_unlock_irqrestore(&host->lock, flags); 483 schedule(); 484 spin_lock_irqsave(&host->lock, flags); 485 } 486 set_current_state(TASK_RUNNING); 487 if (!stop) { 488 host->claimed = 1; 489 host->claimer = current; 490 host->claim_cnt += 1; 491 } else 492 wake_up(&host->wq); 493 spin_unlock_irqrestore(&host->lock, flags); 494 remove_wait_queue(&host->wq, &wait); 495 if (!stop) 496 mmc_host_enable(host); 497 return stop; 498 } 499 500 EXPORT_SYMBOL(__mmc_claim_host); 501 502 /** 503 * mmc_try_claim_host - try exclusively to claim a host 504 * @host: mmc host to claim 505 * 506 * Returns %1 if the host is claimed, %0 otherwise. 507 */ 508 int mmc_try_claim_host(struct mmc_host *host) 509 { 510 int claimed_host = 0; 511 unsigned long flags; 512 513 spin_lock_irqsave(&host->lock, flags); 514 if (!host->claimed || host->claimer == current) { 515 host->claimed = 1; 516 host->claimer = current; 517 host->claim_cnt += 1; 518 claimed_host = 1; 519 } 520 spin_unlock_irqrestore(&host->lock, flags); 521 return claimed_host; 522 } 523 EXPORT_SYMBOL(mmc_try_claim_host); 524 525 static void mmc_do_release_host(struct mmc_host *host) 526 { 527 unsigned long flags; 528 529 spin_lock_irqsave(&host->lock, flags); 530 if (--host->claim_cnt) { 531 /* Release for nested claim */ 532 spin_unlock_irqrestore(&host->lock, flags); 533 } else { 534 host->claimed = 0; 535 host->claimer = NULL; 536 spin_unlock_irqrestore(&host->lock, flags); 537 wake_up(&host->wq); 538 } 539 } 540 541 void mmc_host_deeper_disable(struct work_struct *work) 542 { 543 struct mmc_host *host = 544 container_of(work, struct mmc_host, disable.work); 545 546 /* If the host is claimed then we do not want to disable it anymore */ 547 if (!mmc_try_claim_host(host)) 548 return; 549 mmc_host_do_disable(host, 1); 550 mmc_do_release_host(host); 551 } 552 553 /** 554 * mmc_host_lazy_disable - lazily disable a host. 555 * @host: mmc host to disable 556 * 557 * Hosts that support power saving can use the 'enable' and 'disable' 558 * methods to exit and enter power saving states. For more information 559 * see comments for struct mmc_host_ops. 560 */ 561 int mmc_host_lazy_disable(struct mmc_host *host) 562 { 563 if (!(host->caps & MMC_CAP_DISABLE)) 564 return 0; 565 566 if (host->en_dis_recurs) 567 return 0; 568 569 if (--host->nesting_cnt) 570 return 0; 571 572 if (!host->enabled) 573 return 0; 574 575 if (host->disable_delay) { 576 mmc_schedule_delayed_work(&host->disable, 577 msecs_to_jiffies(host->disable_delay)); 578 return 0; 579 } else 580 return mmc_host_do_disable(host, 1); 581 } 582 EXPORT_SYMBOL(mmc_host_lazy_disable); 583 584 /** 585 * mmc_release_host - release a host 586 * @host: mmc host to release 587 * 588 * Release a MMC host, allowing others to claim the host 589 * for their operations. 590 */ 591 void mmc_release_host(struct mmc_host *host) 592 { 593 WARN_ON(!host->claimed); 594 595 mmc_host_lazy_disable(host); 596 597 mmc_do_release_host(host); 598 } 599 600 EXPORT_SYMBOL(mmc_release_host); 601 602 /* 603 * Internal function that does the actual ios call to the host driver, 604 * optionally printing some debug output. 605 */ 606 static inline void mmc_set_ios(struct mmc_host *host) 607 { 608 struct mmc_ios *ios = &host->ios; 609 610 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 611 "width %u timing %u\n", 612 mmc_hostname(host), ios->clock, ios->bus_mode, 613 ios->power_mode, ios->chip_select, ios->vdd, 614 ios->bus_width, ios->timing); 615 616 host->ops->set_ios(host, ios); 617 } 618 619 /* 620 * Control chip select pin on a host. 621 */ 622 void mmc_set_chip_select(struct mmc_host *host, int mode) 623 { 624 host->ios.chip_select = mode; 625 mmc_set_ios(host); 626 } 627 628 /* 629 * Sets the host clock to the highest possible frequency that 630 * is below "hz". 631 */ 632 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 633 { 634 WARN_ON(hz < host->f_min); 635 636 if (hz > host->f_max) 637 hz = host->f_max; 638 639 host->ios.clock = hz; 640 mmc_set_ios(host); 641 } 642 643 /* 644 * Change the bus mode (open drain/push-pull) of a host. 645 */ 646 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 647 { 648 host->ios.bus_mode = mode; 649 mmc_set_ios(host); 650 } 651 652 /* 653 * Change data bus width of a host. 654 */ 655 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 656 { 657 host->ios.bus_width = width; 658 mmc_set_ios(host); 659 } 660 661 /** 662 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 663 * @vdd: voltage (mV) 664 * @low_bits: prefer low bits in boundary cases 665 * 666 * This function returns the OCR bit number according to the provided @vdd 667 * value. If conversion is not possible a negative errno value returned. 668 * 669 * Depending on the @low_bits flag the function prefers low or high OCR bits 670 * on boundary voltages. For example, 671 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 672 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 673 * 674 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 675 */ 676 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 677 { 678 const int max_bit = ilog2(MMC_VDD_35_36); 679 int bit; 680 681 if (vdd < 1650 || vdd > 3600) 682 return -EINVAL; 683 684 if (vdd >= 1650 && vdd <= 1950) 685 return ilog2(MMC_VDD_165_195); 686 687 if (low_bits) 688 vdd -= 1; 689 690 /* Base 2000 mV, step 100 mV, bit's base 8. */ 691 bit = (vdd - 2000) / 100 + 8; 692 if (bit > max_bit) 693 return max_bit; 694 return bit; 695 } 696 697 /** 698 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 699 * @vdd_min: minimum voltage value (mV) 700 * @vdd_max: maximum voltage value (mV) 701 * 702 * This function returns the OCR mask bits according to the provided @vdd_min 703 * and @vdd_max values. If conversion is not possible the function returns 0. 704 * 705 * Notes wrt boundary cases: 706 * This function sets the OCR bits for all boundary voltages, for example 707 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 708 * MMC_VDD_34_35 mask. 709 */ 710 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 711 { 712 u32 mask = 0; 713 714 if (vdd_max < vdd_min) 715 return 0; 716 717 /* Prefer high bits for the boundary vdd_max values. */ 718 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 719 if (vdd_max < 0) 720 return 0; 721 722 /* Prefer low bits for the boundary vdd_min values. */ 723 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 724 if (vdd_min < 0) 725 return 0; 726 727 /* Fill the mask, from max bit to min bit. */ 728 while (vdd_max >= vdd_min) 729 mask |= 1 << vdd_max--; 730 731 return mask; 732 } 733 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 734 735 #ifdef CONFIG_REGULATOR 736 737 /** 738 * mmc_regulator_get_ocrmask - return mask of supported voltages 739 * @supply: regulator to use 740 * 741 * This returns either a negative errno, or a mask of voltages that 742 * can be provided to MMC/SD/SDIO devices using the specified voltage 743 * regulator. This would normally be called before registering the 744 * MMC host adapter. 745 */ 746 int mmc_regulator_get_ocrmask(struct regulator *supply) 747 { 748 int result = 0; 749 int count; 750 int i; 751 752 count = regulator_count_voltages(supply); 753 if (count < 0) 754 return count; 755 756 for (i = 0; i < count; i++) { 757 int vdd_uV; 758 int vdd_mV; 759 760 vdd_uV = regulator_list_voltage(supply, i); 761 if (vdd_uV <= 0) 762 continue; 763 764 vdd_mV = vdd_uV / 1000; 765 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 766 } 767 768 return result; 769 } 770 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 771 772 /** 773 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 774 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 775 * @supply: regulator to use 776 * 777 * Returns zero on success, else negative errno. 778 * 779 * MMC host drivers may use this to enable or disable a regulator using 780 * a particular supply voltage. This would normally be called from the 781 * set_ios() method. 782 */ 783 int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit) 784 { 785 int result = 0; 786 int min_uV, max_uV; 787 int enabled; 788 789 enabled = regulator_is_enabled(supply); 790 if (enabled < 0) 791 return enabled; 792 793 if (vdd_bit) { 794 int tmp; 795 int voltage; 796 797 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 798 * bits this regulator doesn't quite support ... don't 799 * be too picky, most cards and regulators are OK with 800 * a 0.1V range goof (it's a small error percentage). 801 */ 802 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 803 if (tmp == 0) { 804 min_uV = 1650 * 1000; 805 max_uV = 1950 * 1000; 806 } else { 807 min_uV = 1900 * 1000 + tmp * 100 * 1000; 808 max_uV = min_uV + 100 * 1000; 809 } 810 811 /* avoid needless changes to this voltage; the regulator 812 * might not allow this operation 813 */ 814 voltage = regulator_get_voltage(supply); 815 if (voltage < 0) 816 result = voltage; 817 else if (voltage < min_uV || voltage > max_uV) 818 result = regulator_set_voltage(supply, min_uV, max_uV); 819 else 820 result = 0; 821 822 if (result == 0 && !enabled) 823 result = regulator_enable(supply); 824 } else if (enabled) { 825 result = regulator_disable(supply); 826 } 827 828 return result; 829 } 830 EXPORT_SYMBOL(mmc_regulator_set_ocr); 831 832 #endif 833 834 /* 835 * Mask off any voltages we don't support and select 836 * the lowest voltage 837 */ 838 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 839 { 840 int bit; 841 842 ocr &= host->ocr_avail; 843 844 bit = ffs(ocr); 845 if (bit) { 846 bit -= 1; 847 848 ocr &= 3 << bit; 849 850 host->ios.vdd = bit; 851 mmc_set_ios(host); 852 } else { 853 pr_warning("%s: host doesn't support card's voltages\n", 854 mmc_hostname(host)); 855 ocr = 0; 856 } 857 858 return ocr; 859 } 860 861 /* 862 * Select timing parameters for host. 863 */ 864 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 865 { 866 host->ios.timing = timing; 867 mmc_set_ios(host); 868 } 869 870 /* 871 * Apply power to the MMC stack. This is a two-stage process. 872 * First, we enable power to the card without the clock running. 873 * We then wait a bit for the power to stabilise. Finally, 874 * enable the bus drivers and clock to the card. 875 * 876 * We must _NOT_ enable the clock prior to power stablising. 877 * 878 * If a host does all the power sequencing itself, ignore the 879 * initial MMC_POWER_UP stage. 880 */ 881 static void mmc_power_up(struct mmc_host *host) 882 { 883 int bit; 884 885 /* If ocr is set, we use it */ 886 if (host->ocr) 887 bit = ffs(host->ocr) - 1; 888 else 889 bit = fls(host->ocr_avail) - 1; 890 891 host->ios.vdd = bit; 892 if (mmc_host_is_spi(host)) { 893 host->ios.chip_select = MMC_CS_HIGH; 894 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 895 } else { 896 host->ios.chip_select = MMC_CS_DONTCARE; 897 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 898 } 899 host->ios.power_mode = MMC_POWER_UP; 900 host->ios.bus_width = MMC_BUS_WIDTH_1; 901 host->ios.timing = MMC_TIMING_LEGACY; 902 mmc_set_ios(host); 903 904 /* 905 * This delay should be sufficient to allow the power supply 906 * to reach the minimum voltage. 907 */ 908 mmc_delay(10); 909 910 if (host->f_min > 400000) { 911 pr_warning("%s: Minimum clock frequency too high for " 912 "identification mode\n", mmc_hostname(host)); 913 host->ios.clock = host->f_min; 914 } else 915 host->ios.clock = 400000; 916 917 host->ios.power_mode = MMC_POWER_ON; 918 mmc_set_ios(host); 919 920 /* 921 * This delay must be at least 74 clock sizes, or 1 ms, or the 922 * time required to reach a stable voltage. 923 */ 924 mmc_delay(10); 925 } 926 927 static void mmc_power_off(struct mmc_host *host) 928 { 929 host->ios.clock = 0; 930 host->ios.vdd = 0; 931 if (!mmc_host_is_spi(host)) { 932 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 933 host->ios.chip_select = MMC_CS_DONTCARE; 934 } 935 host->ios.power_mode = MMC_POWER_OFF; 936 host->ios.bus_width = MMC_BUS_WIDTH_1; 937 host->ios.timing = MMC_TIMING_LEGACY; 938 mmc_set_ios(host); 939 } 940 941 /* 942 * Cleanup when the last reference to the bus operator is dropped. 943 */ 944 static void __mmc_release_bus(struct mmc_host *host) 945 { 946 BUG_ON(!host); 947 BUG_ON(host->bus_refs); 948 BUG_ON(!host->bus_dead); 949 950 host->bus_ops = NULL; 951 } 952 953 /* 954 * Increase reference count of bus operator 955 */ 956 static inline void mmc_bus_get(struct mmc_host *host) 957 { 958 unsigned long flags; 959 960 spin_lock_irqsave(&host->lock, flags); 961 host->bus_refs++; 962 spin_unlock_irqrestore(&host->lock, flags); 963 } 964 965 /* 966 * Decrease reference count of bus operator and free it if 967 * it is the last reference. 968 */ 969 static inline void mmc_bus_put(struct mmc_host *host) 970 { 971 unsigned long flags; 972 973 spin_lock_irqsave(&host->lock, flags); 974 host->bus_refs--; 975 if ((host->bus_refs == 0) && host->bus_ops) 976 __mmc_release_bus(host); 977 spin_unlock_irqrestore(&host->lock, flags); 978 } 979 980 /* 981 * Assign a mmc bus handler to a host. Only one bus handler may control a 982 * host at any given time. 983 */ 984 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 985 { 986 unsigned long flags; 987 988 BUG_ON(!host); 989 BUG_ON(!ops); 990 991 WARN_ON(!host->claimed); 992 993 spin_lock_irqsave(&host->lock, flags); 994 995 BUG_ON(host->bus_ops); 996 BUG_ON(host->bus_refs); 997 998 host->bus_ops = ops; 999 host->bus_refs = 1; 1000 host->bus_dead = 0; 1001 1002 spin_unlock_irqrestore(&host->lock, flags); 1003 } 1004 1005 /* 1006 * Remove the current bus handler from a host. Assumes that there are 1007 * no interesting cards left, so the bus is powered down. 1008 */ 1009 void mmc_detach_bus(struct mmc_host *host) 1010 { 1011 unsigned long flags; 1012 1013 BUG_ON(!host); 1014 1015 WARN_ON(!host->claimed); 1016 WARN_ON(!host->bus_ops); 1017 1018 spin_lock_irqsave(&host->lock, flags); 1019 1020 host->bus_dead = 1; 1021 1022 spin_unlock_irqrestore(&host->lock, flags); 1023 1024 mmc_power_off(host); 1025 1026 mmc_bus_put(host); 1027 } 1028 1029 /** 1030 * mmc_detect_change - process change of state on a MMC socket 1031 * @host: host which changed state. 1032 * @delay: optional delay to wait before detection (jiffies) 1033 * 1034 * MMC drivers should call this when they detect a card has been 1035 * inserted or removed. The MMC layer will confirm that any 1036 * present card is still functional, and initialize any newly 1037 * inserted. 1038 */ 1039 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1040 { 1041 #ifdef CONFIG_MMC_DEBUG 1042 unsigned long flags; 1043 spin_lock_irqsave(&host->lock, flags); 1044 WARN_ON(host->removed); 1045 spin_unlock_irqrestore(&host->lock, flags); 1046 #endif 1047 1048 mmc_schedule_delayed_work(&host->detect, delay); 1049 } 1050 1051 EXPORT_SYMBOL(mmc_detect_change); 1052 1053 1054 void mmc_rescan(struct work_struct *work) 1055 { 1056 struct mmc_host *host = 1057 container_of(work, struct mmc_host, detect.work); 1058 u32 ocr; 1059 int err; 1060 1061 mmc_bus_get(host); 1062 1063 /* if there is a card registered, check whether it is still present */ 1064 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1065 host->bus_ops->detect(host); 1066 1067 mmc_bus_put(host); 1068 1069 1070 mmc_bus_get(host); 1071 1072 /* if there still is a card present, stop here */ 1073 if (host->bus_ops != NULL) { 1074 mmc_bus_put(host); 1075 goto out; 1076 } 1077 1078 /* detect a newly inserted card */ 1079 1080 /* 1081 * Only we can add a new handler, so it's safe to 1082 * release the lock here. 1083 */ 1084 mmc_bus_put(host); 1085 1086 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1087 goto out; 1088 1089 mmc_claim_host(host); 1090 1091 mmc_power_up(host); 1092 mmc_go_idle(host); 1093 1094 mmc_send_if_cond(host, host->ocr_avail); 1095 1096 /* 1097 * First we search for SDIO... 1098 */ 1099 err = mmc_send_io_op_cond(host, 0, &ocr); 1100 if (!err) { 1101 if (mmc_attach_sdio(host, ocr)) 1102 mmc_power_off(host); 1103 goto out; 1104 } 1105 1106 /* 1107 * ...then normal SD... 1108 */ 1109 err = mmc_send_app_op_cond(host, 0, &ocr); 1110 if (!err) { 1111 if (mmc_attach_sd(host, ocr)) 1112 mmc_power_off(host); 1113 goto out; 1114 } 1115 1116 /* 1117 * ...and finally MMC. 1118 */ 1119 err = mmc_send_op_cond(host, 0, &ocr); 1120 if (!err) { 1121 if (mmc_attach_mmc(host, ocr)) 1122 mmc_power_off(host); 1123 goto out; 1124 } 1125 1126 mmc_release_host(host); 1127 mmc_power_off(host); 1128 1129 out: 1130 if (host->caps & MMC_CAP_NEEDS_POLL) 1131 mmc_schedule_delayed_work(&host->detect, HZ); 1132 } 1133 1134 void mmc_start_host(struct mmc_host *host) 1135 { 1136 mmc_power_off(host); 1137 mmc_detect_change(host, 0); 1138 } 1139 1140 void mmc_stop_host(struct mmc_host *host) 1141 { 1142 #ifdef CONFIG_MMC_DEBUG 1143 unsigned long flags; 1144 spin_lock_irqsave(&host->lock, flags); 1145 host->removed = 1; 1146 spin_unlock_irqrestore(&host->lock, flags); 1147 #endif 1148 1149 if (host->caps & MMC_CAP_DISABLE) 1150 cancel_delayed_work(&host->disable); 1151 cancel_delayed_work(&host->detect); 1152 mmc_flush_scheduled_work(); 1153 1154 mmc_bus_get(host); 1155 if (host->bus_ops && !host->bus_dead) { 1156 if (host->bus_ops->remove) 1157 host->bus_ops->remove(host); 1158 1159 mmc_claim_host(host); 1160 mmc_detach_bus(host); 1161 mmc_release_host(host); 1162 mmc_bus_put(host); 1163 return; 1164 } 1165 mmc_bus_put(host); 1166 1167 BUG_ON(host->card); 1168 1169 mmc_power_off(host); 1170 } 1171 1172 void mmc_power_save_host(struct mmc_host *host) 1173 { 1174 mmc_bus_get(host); 1175 1176 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1177 mmc_bus_put(host); 1178 return; 1179 } 1180 1181 if (host->bus_ops->power_save) 1182 host->bus_ops->power_save(host); 1183 1184 mmc_bus_put(host); 1185 1186 mmc_power_off(host); 1187 } 1188 EXPORT_SYMBOL(mmc_power_save_host); 1189 1190 void mmc_power_restore_host(struct mmc_host *host) 1191 { 1192 mmc_bus_get(host); 1193 1194 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1195 mmc_bus_put(host); 1196 return; 1197 } 1198 1199 mmc_power_up(host); 1200 host->bus_ops->power_restore(host); 1201 1202 mmc_bus_put(host); 1203 } 1204 EXPORT_SYMBOL(mmc_power_restore_host); 1205 1206 int mmc_card_awake(struct mmc_host *host) 1207 { 1208 int err = -ENOSYS; 1209 1210 mmc_bus_get(host); 1211 1212 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1213 err = host->bus_ops->awake(host); 1214 1215 mmc_bus_put(host); 1216 1217 return err; 1218 } 1219 EXPORT_SYMBOL(mmc_card_awake); 1220 1221 int mmc_card_sleep(struct mmc_host *host) 1222 { 1223 int err = -ENOSYS; 1224 1225 mmc_bus_get(host); 1226 1227 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1228 err = host->bus_ops->sleep(host); 1229 1230 mmc_bus_put(host); 1231 1232 return err; 1233 } 1234 EXPORT_SYMBOL(mmc_card_sleep); 1235 1236 int mmc_card_can_sleep(struct mmc_host *host) 1237 { 1238 struct mmc_card *card = host->card; 1239 1240 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1241 return 1; 1242 return 0; 1243 } 1244 EXPORT_SYMBOL(mmc_card_can_sleep); 1245 1246 #ifdef CONFIG_PM 1247 1248 /** 1249 * mmc_suspend_host - suspend a host 1250 * @host: mmc host 1251 * @state: suspend mode (PM_SUSPEND_xxx) 1252 */ 1253 int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1254 { 1255 int err = 0; 1256 1257 if (host->caps & MMC_CAP_DISABLE) 1258 cancel_delayed_work(&host->disable); 1259 cancel_delayed_work(&host->detect); 1260 mmc_flush_scheduled_work(); 1261 1262 mmc_bus_get(host); 1263 if (host->bus_ops && !host->bus_dead) { 1264 if (host->bus_ops->suspend) 1265 err = host->bus_ops->suspend(host); 1266 if (err == -ENOSYS || !host->bus_ops->resume) { 1267 /* 1268 * We simply "remove" the card in this case. 1269 * It will be redetected on resume. 1270 */ 1271 if (host->bus_ops->remove) 1272 host->bus_ops->remove(host); 1273 mmc_claim_host(host); 1274 mmc_detach_bus(host); 1275 mmc_release_host(host); 1276 err = 0; 1277 } 1278 } 1279 mmc_bus_put(host); 1280 1281 if (!err) 1282 mmc_power_off(host); 1283 1284 return err; 1285 } 1286 1287 EXPORT_SYMBOL(mmc_suspend_host); 1288 1289 /** 1290 * mmc_resume_host - resume a previously suspended host 1291 * @host: mmc host 1292 */ 1293 int mmc_resume_host(struct mmc_host *host) 1294 { 1295 int err = 0; 1296 1297 mmc_bus_get(host); 1298 if (host->bus_ops && !host->bus_dead) { 1299 mmc_power_up(host); 1300 mmc_select_voltage(host, host->ocr); 1301 BUG_ON(!host->bus_ops->resume); 1302 err = host->bus_ops->resume(host); 1303 if (err) { 1304 printk(KERN_WARNING "%s: error %d during resume " 1305 "(card was removed?)\n", 1306 mmc_hostname(host), err); 1307 if (host->bus_ops->remove) 1308 host->bus_ops->remove(host); 1309 mmc_claim_host(host); 1310 mmc_detach_bus(host); 1311 mmc_release_host(host); 1312 /* no need to bother upper layers */ 1313 err = 0; 1314 } 1315 } 1316 mmc_bus_put(host); 1317 1318 /* 1319 * We add a slight delay here so that resume can progress 1320 * in parallel. 1321 */ 1322 mmc_detect_change(host, 1); 1323 1324 return err; 1325 } 1326 1327 EXPORT_SYMBOL(mmc_resume_host); 1328 1329 #endif 1330 1331 static int __init mmc_init(void) 1332 { 1333 int ret; 1334 1335 workqueue = create_singlethread_workqueue("kmmcd"); 1336 if (!workqueue) 1337 return -ENOMEM; 1338 1339 ret = mmc_register_bus(); 1340 if (ret) 1341 goto destroy_workqueue; 1342 1343 ret = mmc_register_host_class(); 1344 if (ret) 1345 goto unregister_bus; 1346 1347 ret = sdio_register_bus(); 1348 if (ret) 1349 goto unregister_host_class; 1350 1351 return 0; 1352 1353 unregister_host_class: 1354 mmc_unregister_host_class(); 1355 unregister_bus: 1356 mmc_unregister_bus(); 1357 destroy_workqueue: 1358 destroy_workqueue(workqueue); 1359 1360 return ret; 1361 } 1362 1363 static void __exit mmc_exit(void) 1364 { 1365 sdio_unregister_bus(); 1366 mmc_unregister_host_class(); 1367 mmc_unregister_bus(); 1368 destroy_workqueue(workqueue); 1369 } 1370 1371 subsys_initcall(mmc_init); 1372 module_exit(mmc_exit); 1373 1374 MODULE_LICENSE("GPL"); 1375