1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 26 #include <linux/mmc/card.h> 27 #include <linux/mmc/host.h> 28 #include <linux/mmc/mmc.h> 29 #include <linux/mmc/sd.h> 30 31 #include "core.h" 32 #include "bus.h" 33 #include "host.h" 34 #include "sdio_bus.h" 35 36 #include "mmc_ops.h" 37 #include "sd_ops.h" 38 #include "sdio_ops.h" 39 40 static struct workqueue_struct *workqueue; 41 42 /* 43 * Enabling software CRCs on the data blocks can be a significant (30%) 44 * performance cost, and for other reasons may not always be desired. 45 * So we allow it it to be disabled. 46 */ 47 int use_spi_crc = 1; 48 module_param(use_spi_crc, bool, 0); 49 50 /* 51 * We normally treat cards as removed during suspend if they are not 52 * known to be on a non-removable bus, to avoid the risk of writing 53 * back data to a different card after resume. Allow this to be 54 * overridden if necessary. 55 */ 56 #ifdef CONFIG_MMC_UNSAFE_RESUME 57 int mmc_assume_removable; 58 #else 59 int mmc_assume_removable = 1; 60 #endif 61 EXPORT_SYMBOL(mmc_assume_removable); 62 module_param_named(removable, mmc_assume_removable, bool, 0644); 63 MODULE_PARM_DESC( 64 removable, 65 "MMC/SD cards are removable and may be removed during suspend"); 66 67 /* 68 * Internal function. Schedule delayed work in the MMC work queue. 69 */ 70 static int mmc_schedule_delayed_work(struct delayed_work *work, 71 unsigned long delay) 72 { 73 return queue_delayed_work(workqueue, work, delay); 74 } 75 76 /* 77 * Internal function. Flush all scheduled work from the MMC work queue. 78 */ 79 static void mmc_flush_scheduled_work(void) 80 { 81 flush_workqueue(workqueue); 82 } 83 84 /** 85 * mmc_request_done - finish processing an MMC request 86 * @host: MMC host which completed request 87 * @mrq: MMC request which request 88 * 89 * MMC drivers should call this function when they have completed 90 * their processing of a request. 91 */ 92 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 93 { 94 struct mmc_command *cmd = mrq->cmd; 95 int err = cmd->error; 96 97 if (err && cmd->retries && mmc_host_is_spi(host)) { 98 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 99 cmd->retries = 0; 100 } 101 102 if (err && cmd->retries) { 103 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 104 mmc_hostname(host), cmd->opcode, err); 105 106 cmd->retries--; 107 cmd->error = 0; 108 host->ops->request(host, mrq); 109 } else { 110 led_trigger_event(host->led, LED_OFF); 111 112 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 113 mmc_hostname(host), cmd->opcode, err, 114 cmd->resp[0], cmd->resp[1], 115 cmd->resp[2], cmd->resp[3]); 116 117 if (mrq->data) { 118 pr_debug("%s: %d bytes transferred: %d\n", 119 mmc_hostname(host), 120 mrq->data->bytes_xfered, mrq->data->error); 121 } 122 123 if (mrq->stop) { 124 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 125 mmc_hostname(host), mrq->stop->opcode, 126 mrq->stop->error, 127 mrq->stop->resp[0], mrq->stop->resp[1], 128 mrq->stop->resp[2], mrq->stop->resp[3]); 129 } 130 131 if (mrq->done) 132 mrq->done(mrq); 133 } 134 } 135 136 EXPORT_SYMBOL(mmc_request_done); 137 138 static void 139 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 140 { 141 #ifdef CONFIG_MMC_DEBUG 142 unsigned int i, sz; 143 struct scatterlist *sg; 144 #endif 145 146 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 147 mmc_hostname(host), mrq->cmd->opcode, 148 mrq->cmd->arg, mrq->cmd->flags); 149 150 if (mrq->data) { 151 pr_debug("%s: blksz %d blocks %d flags %08x " 152 "tsac %d ms nsac %d\n", 153 mmc_hostname(host), mrq->data->blksz, 154 mrq->data->blocks, mrq->data->flags, 155 mrq->data->timeout_ns / 1000000, 156 mrq->data->timeout_clks); 157 } 158 159 if (mrq->stop) { 160 pr_debug("%s: CMD%u arg %08x flags %08x\n", 161 mmc_hostname(host), mrq->stop->opcode, 162 mrq->stop->arg, mrq->stop->flags); 163 } 164 165 WARN_ON(!host->claimed); 166 167 led_trigger_event(host->led, LED_FULL); 168 169 mrq->cmd->error = 0; 170 mrq->cmd->mrq = mrq; 171 if (mrq->data) { 172 BUG_ON(mrq->data->blksz > host->max_blk_size); 173 BUG_ON(mrq->data->blocks > host->max_blk_count); 174 BUG_ON(mrq->data->blocks * mrq->data->blksz > 175 host->max_req_size); 176 177 #ifdef CONFIG_MMC_DEBUG 178 sz = 0; 179 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 180 sz += sg->length; 181 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 182 #endif 183 184 mrq->cmd->data = mrq->data; 185 mrq->data->error = 0; 186 mrq->data->mrq = mrq; 187 if (mrq->stop) { 188 mrq->data->stop = mrq->stop; 189 mrq->stop->error = 0; 190 mrq->stop->mrq = mrq; 191 } 192 } 193 host->ops->request(host, mrq); 194 } 195 196 static void mmc_wait_done(struct mmc_request *mrq) 197 { 198 complete(mrq->done_data); 199 } 200 201 /** 202 * mmc_wait_for_req - start a request and wait for completion 203 * @host: MMC host to start command 204 * @mrq: MMC request to start 205 * 206 * Start a new MMC custom command request for a host, and wait 207 * for the command to complete. Does not attempt to parse the 208 * response. 209 */ 210 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 211 { 212 DECLARE_COMPLETION_ONSTACK(complete); 213 214 mrq->done_data = &complete; 215 mrq->done = mmc_wait_done; 216 217 mmc_start_request(host, mrq); 218 219 wait_for_completion(&complete); 220 } 221 222 EXPORT_SYMBOL(mmc_wait_for_req); 223 224 /** 225 * mmc_wait_for_cmd - start a command and wait for completion 226 * @host: MMC host to start command 227 * @cmd: MMC command to start 228 * @retries: maximum number of retries 229 * 230 * Start a new MMC command for a host, and wait for the command 231 * to complete. Return any error that occurred while the command 232 * was executing. Do not attempt to parse the response. 233 */ 234 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 235 { 236 struct mmc_request mrq; 237 238 WARN_ON(!host->claimed); 239 240 memset(&mrq, 0, sizeof(struct mmc_request)); 241 242 memset(cmd->resp, 0, sizeof(cmd->resp)); 243 cmd->retries = retries; 244 245 mrq.cmd = cmd; 246 cmd->data = NULL; 247 248 mmc_wait_for_req(host, &mrq); 249 250 return cmd->error; 251 } 252 253 EXPORT_SYMBOL(mmc_wait_for_cmd); 254 255 /** 256 * mmc_set_data_timeout - set the timeout for a data command 257 * @data: data phase for command 258 * @card: the MMC card associated with the data transfer 259 * 260 * Computes the data timeout parameters according to the 261 * correct algorithm given the card type. 262 */ 263 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 264 { 265 unsigned int mult; 266 267 /* 268 * SDIO cards only define an upper 1 s limit on access. 269 */ 270 if (mmc_card_sdio(card)) { 271 data->timeout_ns = 1000000000; 272 data->timeout_clks = 0; 273 return; 274 } 275 276 /* 277 * SD cards use a 100 multiplier rather than 10 278 */ 279 mult = mmc_card_sd(card) ? 100 : 10; 280 281 /* 282 * Scale up the multiplier (and therefore the timeout) by 283 * the r2w factor for writes. 284 */ 285 if (data->flags & MMC_DATA_WRITE) 286 mult <<= card->csd.r2w_factor; 287 288 data->timeout_ns = card->csd.tacc_ns * mult; 289 data->timeout_clks = card->csd.tacc_clks * mult; 290 291 /* 292 * SD cards also have an upper limit on the timeout. 293 */ 294 if (mmc_card_sd(card)) { 295 unsigned int timeout_us, limit_us; 296 297 timeout_us = data->timeout_ns / 1000; 298 timeout_us += data->timeout_clks * 1000 / 299 (card->host->ios.clock / 1000); 300 301 if (data->flags & MMC_DATA_WRITE) 302 /* 303 * The limit is really 250 ms, but that is 304 * insufficient for some crappy cards. 305 */ 306 limit_us = 300000; 307 else 308 limit_us = 100000; 309 310 /* 311 * SDHC cards always use these fixed values. 312 */ 313 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 314 data->timeout_ns = limit_us * 1000; 315 data->timeout_clks = 0; 316 } 317 } 318 /* 319 * Some cards need very high timeouts if driven in SPI mode. 320 * The worst observed timeout was 900ms after writing a 321 * continuous stream of data until the internal logic 322 * overflowed. 323 */ 324 if (mmc_host_is_spi(card->host)) { 325 if (data->flags & MMC_DATA_WRITE) { 326 if (data->timeout_ns < 1000000000) 327 data->timeout_ns = 1000000000; /* 1s */ 328 } else { 329 if (data->timeout_ns < 100000000) 330 data->timeout_ns = 100000000; /* 100ms */ 331 } 332 } 333 } 334 EXPORT_SYMBOL(mmc_set_data_timeout); 335 336 /** 337 * mmc_align_data_size - pads a transfer size to a more optimal value 338 * @card: the MMC card associated with the data transfer 339 * @sz: original transfer size 340 * 341 * Pads the original data size with a number of extra bytes in 342 * order to avoid controller bugs and/or performance hits 343 * (e.g. some controllers revert to PIO for certain sizes). 344 * 345 * Returns the improved size, which might be unmodified. 346 * 347 * Note that this function is only relevant when issuing a 348 * single scatter gather entry. 349 */ 350 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 351 { 352 /* 353 * FIXME: We don't have a system for the controller to tell 354 * the core about its problems yet, so for now we just 32-bit 355 * align the size. 356 */ 357 sz = ((sz + 3) / 4) * 4; 358 359 return sz; 360 } 361 EXPORT_SYMBOL(mmc_align_data_size); 362 363 /** 364 * mmc_host_enable - enable a host. 365 * @host: mmc host to enable 366 * 367 * Hosts that support power saving can use the 'enable' and 'disable' 368 * methods to exit and enter power saving states. For more information 369 * see comments for struct mmc_host_ops. 370 */ 371 int mmc_host_enable(struct mmc_host *host) 372 { 373 if (!(host->caps & MMC_CAP_DISABLE)) 374 return 0; 375 376 if (host->en_dis_recurs) 377 return 0; 378 379 if (host->nesting_cnt++) 380 return 0; 381 382 cancel_delayed_work_sync(&host->disable); 383 384 if (host->enabled) 385 return 0; 386 387 if (host->ops->enable) { 388 int err; 389 390 host->en_dis_recurs = 1; 391 err = host->ops->enable(host); 392 host->en_dis_recurs = 0; 393 394 if (err) { 395 pr_debug("%s: enable error %d\n", 396 mmc_hostname(host), err); 397 return err; 398 } 399 } 400 host->enabled = 1; 401 return 0; 402 } 403 EXPORT_SYMBOL(mmc_host_enable); 404 405 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 406 { 407 if (host->ops->disable) { 408 int err; 409 410 host->en_dis_recurs = 1; 411 err = host->ops->disable(host, lazy); 412 host->en_dis_recurs = 0; 413 414 if (err < 0) { 415 pr_debug("%s: disable error %d\n", 416 mmc_hostname(host), err); 417 return err; 418 } 419 if (err > 0) { 420 unsigned long delay = msecs_to_jiffies(err); 421 422 mmc_schedule_delayed_work(&host->disable, delay); 423 } 424 } 425 host->enabled = 0; 426 return 0; 427 } 428 429 /** 430 * mmc_host_disable - disable a host. 431 * @host: mmc host to disable 432 * 433 * Hosts that support power saving can use the 'enable' and 'disable' 434 * methods to exit and enter power saving states. For more information 435 * see comments for struct mmc_host_ops. 436 */ 437 int mmc_host_disable(struct mmc_host *host) 438 { 439 int err; 440 441 if (!(host->caps & MMC_CAP_DISABLE)) 442 return 0; 443 444 if (host->en_dis_recurs) 445 return 0; 446 447 if (--host->nesting_cnt) 448 return 0; 449 450 if (!host->enabled) 451 return 0; 452 453 err = mmc_host_do_disable(host, 0); 454 return err; 455 } 456 EXPORT_SYMBOL(mmc_host_disable); 457 458 /** 459 * __mmc_claim_host - exclusively claim a host 460 * @host: mmc host to claim 461 * @abort: whether or not the operation should be aborted 462 * 463 * Claim a host for a set of operations. If @abort is non null and 464 * dereference a non-zero value then this will return prematurely with 465 * that non-zero value without acquiring the lock. Returns zero 466 * with the lock held otherwise. 467 */ 468 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 469 { 470 DECLARE_WAITQUEUE(wait, current); 471 unsigned long flags; 472 int stop; 473 474 might_sleep(); 475 476 add_wait_queue(&host->wq, &wait); 477 spin_lock_irqsave(&host->lock, flags); 478 while (1) { 479 set_current_state(TASK_UNINTERRUPTIBLE); 480 stop = abort ? atomic_read(abort) : 0; 481 if (stop || !host->claimed || host->claimer == current) 482 break; 483 spin_unlock_irqrestore(&host->lock, flags); 484 schedule(); 485 spin_lock_irqsave(&host->lock, flags); 486 } 487 set_current_state(TASK_RUNNING); 488 if (!stop) { 489 host->claimed = 1; 490 host->claimer = current; 491 host->claim_cnt += 1; 492 } else 493 wake_up(&host->wq); 494 spin_unlock_irqrestore(&host->lock, flags); 495 remove_wait_queue(&host->wq, &wait); 496 if (!stop) 497 mmc_host_enable(host); 498 return stop; 499 } 500 501 EXPORT_SYMBOL(__mmc_claim_host); 502 503 /** 504 * mmc_try_claim_host - try exclusively to claim a host 505 * @host: mmc host to claim 506 * 507 * Returns %1 if the host is claimed, %0 otherwise. 508 */ 509 int mmc_try_claim_host(struct mmc_host *host) 510 { 511 int claimed_host = 0; 512 unsigned long flags; 513 514 spin_lock_irqsave(&host->lock, flags); 515 if (!host->claimed || host->claimer == current) { 516 host->claimed = 1; 517 host->claimer = current; 518 host->claim_cnt += 1; 519 claimed_host = 1; 520 } 521 spin_unlock_irqrestore(&host->lock, flags); 522 return claimed_host; 523 } 524 EXPORT_SYMBOL(mmc_try_claim_host); 525 526 static void mmc_do_release_host(struct mmc_host *host) 527 { 528 unsigned long flags; 529 530 spin_lock_irqsave(&host->lock, flags); 531 if (--host->claim_cnt) { 532 /* Release for nested claim */ 533 spin_unlock_irqrestore(&host->lock, flags); 534 } else { 535 host->claimed = 0; 536 host->claimer = NULL; 537 spin_unlock_irqrestore(&host->lock, flags); 538 wake_up(&host->wq); 539 } 540 } 541 542 void mmc_host_deeper_disable(struct work_struct *work) 543 { 544 struct mmc_host *host = 545 container_of(work, struct mmc_host, disable.work); 546 547 /* If the host is claimed then we do not want to disable it anymore */ 548 if (!mmc_try_claim_host(host)) 549 return; 550 mmc_host_do_disable(host, 1); 551 mmc_do_release_host(host); 552 } 553 554 /** 555 * mmc_host_lazy_disable - lazily disable a host. 556 * @host: mmc host to disable 557 * 558 * Hosts that support power saving can use the 'enable' and 'disable' 559 * methods to exit and enter power saving states. For more information 560 * see comments for struct mmc_host_ops. 561 */ 562 int mmc_host_lazy_disable(struct mmc_host *host) 563 { 564 if (!(host->caps & MMC_CAP_DISABLE)) 565 return 0; 566 567 if (host->en_dis_recurs) 568 return 0; 569 570 if (--host->nesting_cnt) 571 return 0; 572 573 if (!host->enabled) 574 return 0; 575 576 if (host->disable_delay) { 577 mmc_schedule_delayed_work(&host->disable, 578 msecs_to_jiffies(host->disable_delay)); 579 return 0; 580 } else 581 return mmc_host_do_disable(host, 1); 582 } 583 EXPORT_SYMBOL(mmc_host_lazy_disable); 584 585 /** 586 * mmc_release_host - release a host 587 * @host: mmc host to release 588 * 589 * Release a MMC host, allowing others to claim the host 590 * for their operations. 591 */ 592 void mmc_release_host(struct mmc_host *host) 593 { 594 WARN_ON(!host->claimed); 595 596 mmc_host_lazy_disable(host); 597 598 mmc_do_release_host(host); 599 } 600 601 EXPORT_SYMBOL(mmc_release_host); 602 603 /* 604 * Internal function that does the actual ios call to the host driver, 605 * optionally printing some debug output. 606 */ 607 static inline void mmc_set_ios(struct mmc_host *host) 608 { 609 struct mmc_ios *ios = &host->ios; 610 611 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 612 "width %u timing %u\n", 613 mmc_hostname(host), ios->clock, ios->bus_mode, 614 ios->power_mode, ios->chip_select, ios->vdd, 615 ios->bus_width, ios->timing); 616 617 host->ops->set_ios(host, ios); 618 } 619 620 /* 621 * Control chip select pin on a host. 622 */ 623 void mmc_set_chip_select(struct mmc_host *host, int mode) 624 { 625 host->ios.chip_select = mode; 626 mmc_set_ios(host); 627 } 628 629 /* 630 * Sets the host clock to the highest possible frequency that 631 * is below "hz". 632 */ 633 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 634 { 635 WARN_ON(hz < host->f_min); 636 637 if (hz > host->f_max) 638 hz = host->f_max; 639 640 host->ios.clock = hz; 641 mmc_set_ios(host); 642 } 643 644 /* 645 * Change the bus mode (open drain/push-pull) of a host. 646 */ 647 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 648 { 649 host->ios.bus_mode = mode; 650 mmc_set_ios(host); 651 } 652 653 /* 654 * Change data bus width and DDR mode of a host. 655 */ 656 void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 657 unsigned int ddr) 658 { 659 host->ios.bus_width = width; 660 host->ios.ddr = ddr; 661 mmc_set_ios(host); 662 } 663 664 /* 665 * Change data bus width of a host. 666 */ 667 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 668 { 669 mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); 670 } 671 672 /** 673 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 674 * @vdd: voltage (mV) 675 * @low_bits: prefer low bits in boundary cases 676 * 677 * This function returns the OCR bit number according to the provided @vdd 678 * value. If conversion is not possible a negative errno value returned. 679 * 680 * Depending on the @low_bits flag the function prefers low or high OCR bits 681 * on boundary voltages. For example, 682 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 683 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 684 * 685 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 686 */ 687 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 688 { 689 const int max_bit = ilog2(MMC_VDD_35_36); 690 int bit; 691 692 if (vdd < 1650 || vdd > 3600) 693 return -EINVAL; 694 695 if (vdd >= 1650 && vdd <= 1950) 696 return ilog2(MMC_VDD_165_195); 697 698 if (low_bits) 699 vdd -= 1; 700 701 /* Base 2000 mV, step 100 mV, bit's base 8. */ 702 bit = (vdd - 2000) / 100 + 8; 703 if (bit > max_bit) 704 return max_bit; 705 return bit; 706 } 707 708 /** 709 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 710 * @vdd_min: minimum voltage value (mV) 711 * @vdd_max: maximum voltage value (mV) 712 * 713 * This function returns the OCR mask bits according to the provided @vdd_min 714 * and @vdd_max values. If conversion is not possible the function returns 0. 715 * 716 * Notes wrt boundary cases: 717 * This function sets the OCR bits for all boundary voltages, for example 718 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 719 * MMC_VDD_34_35 mask. 720 */ 721 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 722 { 723 u32 mask = 0; 724 725 if (vdd_max < vdd_min) 726 return 0; 727 728 /* Prefer high bits for the boundary vdd_max values. */ 729 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 730 if (vdd_max < 0) 731 return 0; 732 733 /* Prefer low bits for the boundary vdd_min values. */ 734 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 735 if (vdd_min < 0) 736 return 0; 737 738 /* Fill the mask, from max bit to min bit. */ 739 while (vdd_max >= vdd_min) 740 mask |= 1 << vdd_max--; 741 742 return mask; 743 } 744 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 745 746 #ifdef CONFIG_REGULATOR 747 748 /** 749 * mmc_regulator_get_ocrmask - return mask of supported voltages 750 * @supply: regulator to use 751 * 752 * This returns either a negative errno, or a mask of voltages that 753 * can be provided to MMC/SD/SDIO devices using the specified voltage 754 * regulator. This would normally be called before registering the 755 * MMC host adapter. 756 */ 757 int mmc_regulator_get_ocrmask(struct regulator *supply) 758 { 759 int result = 0; 760 int count; 761 int i; 762 763 count = regulator_count_voltages(supply); 764 if (count < 0) 765 return count; 766 767 for (i = 0; i < count; i++) { 768 int vdd_uV; 769 int vdd_mV; 770 771 vdd_uV = regulator_list_voltage(supply, i); 772 if (vdd_uV <= 0) 773 continue; 774 775 vdd_mV = vdd_uV / 1000; 776 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 777 } 778 779 return result; 780 } 781 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 782 783 /** 784 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 785 * @mmc: the host to regulate 786 * @supply: regulator to use 787 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 788 * 789 * Returns zero on success, else negative errno. 790 * 791 * MMC host drivers may use this to enable or disable a regulator using 792 * a particular supply voltage. This would normally be called from the 793 * set_ios() method. 794 */ 795 int mmc_regulator_set_ocr(struct mmc_host *mmc, 796 struct regulator *supply, 797 unsigned short vdd_bit) 798 { 799 int result = 0; 800 int min_uV, max_uV; 801 802 if (vdd_bit) { 803 int tmp; 804 int voltage; 805 806 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 807 * bits this regulator doesn't quite support ... don't 808 * be too picky, most cards and regulators are OK with 809 * a 0.1V range goof (it's a small error percentage). 810 */ 811 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 812 if (tmp == 0) { 813 min_uV = 1650 * 1000; 814 max_uV = 1950 * 1000; 815 } else { 816 min_uV = 1900 * 1000 + tmp * 100 * 1000; 817 max_uV = min_uV + 100 * 1000; 818 } 819 820 /* avoid needless changes to this voltage; the regulator 821 * might not allow this operation 822 */ 823 voltage = regulator_get_voltage(supply); 824 if (voltage < 0) 825 result = voltage; 826 else if (voltage < min_uV || voltage > max_uV) 827 result = regulator_set_voltage(supply, min_uV, max_uV); 828 else 829 result = 0; 830 831 if (result == 0 && !mmc->regulator_enabled) { 832 result = regulator_enable(supply); 833 if (!result) 834 mmc->regulator_enabled = true; 835 } 836 } else if (mmc->regulator_enabled) { 837 result = regulator_disable(supply); 838 if (result == 0) 839 mmc->regulator_enabled = false; 840 } 841 842 if (result) 843 dev_err(mmc_dev(mmc), 844 "could not set regulator OCR (%d)\n", result); 845 return result; 846 } 847 EXPORT_SYMBOL(mmc_regulator_set_ocr); 848 849 #endif /* CONFIG_REGULATOR */ 850 851 /* 852 * Mask off any voltages we don't support and select 853 * the lowest voltage 854 */ 855 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 856 { 857 int bit; 858 859 ocr &= host->ocr_avail; 860 861 bit = ffs(ocr); 862 if (bit) { 863 bit -= 1; 864 865 ocr &= 3 << bit; 866 867 host->ios.vdd = bit; 868 mmc_set_ios(host); 869 } else { 870 pr_warning("%s: host doesn't support card's voltages\n", 871 mmc_hostname(host)); 872 ocr = 0; 873 } 874 875 return ocr; 876 } 877 878 /* 879 * Select timing parameters for host. 880 */ 881 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 882 { 883 host->ios.timing = timing; 884 mmc_set_ios(host); 885 } 886 887 /* 888 * Apply power to the MMC stack. This is a two-stage process. 889 * First, we enable power to the card without the clock running. 890 * We then wait a bit for the power to stabilise. Finally, 891 * enable the bus drivers and clock to the card. 892 * 893 * We must _NOT_ enable the clock prior to power stablising. 894 * 895 * If a host does all the power sequencing itself, ignore the 896 * initial MMC_POWER_UP stage. 897 */ 898 static void mmc_power_up(struct mmc_host *host) 899 { 900 int bit; 901 902 /* If ocr is set, we use it */ 903 if (host->ocr) 904 bit = ffs(host->ocr) - 1; 905 else 906 bit = fls(host->ocr_avail) - 1; 907 908 host->ios.vdd = bit; 909 if (mmc_host_is_spi(host)) { 910 host->ios.chip_select = MMC_CS_HIGH; 911 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 912 } else { 913 host->ios.chip_select = MMC_CS_DONTCARE; 914 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 915 } 916 host->ios.power_mode = MMC_POWER_UP; 917 host->ios.bus_width = MMC_BUS_WIDTH_1; 918 host->ios.timing = MMC_TIMING_LEGACY; 919 mmc_set_ios(host); 920 921 /* 922 * This delay should be sufficient to allow the power supply 923 * to reach the minimum voltage. 924 */ 925 mmc_delay(10); 926 927 host->ios.clock = host->f_init; 928 929 host->ios.power_mode = MMC_POWER_ON; 930 mmc_set_ios(host); 931 932 /* 933 * This delay must be at least 74 clock sizes, or 1 ms, or the 934 * time required to reach a stable voltage. 935 */ 936 mmc_delay(10); 937 } 938 939 static void mmc_power_off(struct mmc_host *host) 940 { 941 host->ios.clock = 0; 942 host->ios.vdd = 0; 943 if (!mmc_host_is_spi(host)) { 944 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 945 host->ios.chip_select = MMC_CS_DONTCARE; 946 } 947 host->ios.power_mode = MMC_POWER_OFF; 948 host->ios.bus_width = MMC_BUS_WIDTH_1; 949 host->ios.timing = MMC_TIMING_LEGACY; 950 mmc_set_ios(host); 951 } 952 953 /* 954 * Cleanup when the last reference to the bus operator is dropped. 955 */ 956 static void __mmc_release_bus(struct mmc_host *host) 957 { 958 BUG_ON(!host); 959 BUG_ON(host->bus_refs); 960 BUG_ON(!host->bus_dead); 961 962 host->bus_ops = NULL; 963 } 964 965 /* 966 * Increase reference count of bus operator 967 */ 968 static inline void mmc_bus_get(struct mmc_host *host) 969 { 970 unsigned long flags; 971 972 spin_lock_irqsave(&host->lock, flags); 973 host->bus_refs++; 974 spin_unlock_irqrestore(&host->lock, flags); 975 } 976 977 /* 978 * Decrease reference count of bus operator and free it if 979 * it is the last reference. 980 */ 981 static inline void mmc_bus_put(struct mmc_host *host) 982 { 983 unsigned long flags; 984 985 spin_lock_irqsave(&host->lock, flags); 986 host->bus_refs--; 987 if ((host->bus_refs == 0) && host->bus_ops) 988 __mmc_release_bus(host); 989 spin_unlock_irqrestore(&host->lock, flags); 990 } 991 992 /* 993 * Assign a mmc bus handler to a host. Only one bus handler may control a 994 * host at any given time. 995 */ 996 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 997 { 998 unsigned long flags; 999 1000 BUG_ON(!host); 1001 BUG_ON(!ops); 1002 1003 WARN_ON(!host->claimed); 1004 1005 spin_lock_irqsave(&host->lock, flags); 1006 1007 BUG_ON(host->bus_ops); 1008 BUG_ON(host->bus_refs); 1009 1010 host->bus_ops = ops; 1011 host->bus_refs = 1; 1012 host->bus_dead = 0; 1013 1014 spin_unlock_irqrestore(&host->lock, flags); 1015 } 1016 1017 /* 1018 * Remove the current bus handler from a host. Assumes that there are 1019 * no interesting cards left, so the bus is powered down. 1020 */ 1021 void mmc_detach_bus(struct mmc_host *host) 1022 { 1023 unsigned long flags; 1024 1025 BUG_ON(!host); 1026 1027 WARN_ON(!host->claimed); 1028 WARN_ON(!host->bus_ops); 1029 1030 spin_lock_irqsave(&host->lock, flags); 1031 1032 host->bus_dead = 1; 1033 1034 spin_unlock_irqrestore(&host->lock, flags); 1035 1036 mmc_power_off(host); 1037 1038 mmc_bus_put(host); 1039 } 1040 1041 /** 1042 * mmc_detect_change - process change of state on a MMC socket 1043 * @host: host which changed state. 1044 * @delay: optional delay to wait before detection (jiffies) 1045 * 1046 * MMC drivers should call this when they detect a card has been 1047 * inserted or removed. The MMC layer will confirm that any 1048 * present card is still functional, and initialize any newly 1049 * inserted. 1050 */ 1051 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1052 { 1053 #ifdef CONFIG_MMC_DEBUG 1054 unsigned long flags; 1055 spin_lock_irqsave(&host->lock, flags); 1056 WARN_ON(host->removed); 1057 spin_unlock_irqrestore(&host->lock, flags); 1058 #endif 1059 1060 mmc_schedule_delayed_work(&host->detect, delay); 1061 } 1062 1063 EXPORT_SYMBOL(mmc_detect_change); 1064 1065 void mmc_init_erase(struct mmc_card *card) 1066 { 1067 unsigned int sz; 1068 1069 if (is_power_of_2(card->erase_size)) 1070 card->erase_shift = ffs(card->erase_size) - 1; 1071 else 1072 card->erase_shift = 0; 1073 1074 /* 1075 * It is possible to erase an arbitrarily large area of an SD or MMC 1076 * card. That is not desirable because it can take a long time 1077 * (minutes) potentially delaying more important I/O, and also the 1078 * timeout calculations become increasingly hugely over-estimated. 1079 * Consequently, 'pref_erase' is defined as a guide to limit erases 1080 * to that size and alignment. 1081 * 1082 * For SD cards that define Allocation Unit size, limit erases to one 1083 * Allocation Unit at a time. For MMC cards that define High Capacity 1084 * Erase Size, whether it is switched on or not, limit to that size. 1085 * Otherwise just have a stab at a good value. For modern cards it 1086 * will end up being 4MiB. Note that if the value is too small, it 1087 * can end up taking longer to erase. 1088 */ 1089 if (mmc_card_sd(card) && card->ssr.au) { 1090 card->pref_erase = card->ssr.au; 1091 card->erase_shift = ffs(card->ssr.au) - 1; 1092 } else if (card->ext_csd.hc_erase_size) { 1093 card->pref_erase = card->ext_csd.hc_erase_size; 1094 } else { 1095 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1096 if (sz < 128) 1097 card->pref_erase = 512 * 1024 / 512; 1098 else if (sz < 512) 1099 card->pref_erase = 1024 * 1024 / 512; 1100 else if (sz < 1024) 1101 card->pref_erase = 2 * 1024 * 1024 / 512; 1102 else 1103 card->pref_erase = 4 * 1024 * 1024 / 512; 1104 if (card->pref_erase < card->erase_size) 1105 card->pref_erase = card->erase_size; 1106 else { 1107 sz = card->pref_erase % card->erase_size; 1108 if (sz) 1109 card->pref_erase += card->erase_size - sz; 1110 } 1111 } 1112 } 1113 1114 static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1115 struct mmc_command *cmd, 1116 unsigned int arg, unsigned int qty) 1117 { 1118 unsigned int erase_timeout; 1119 1120 if (card->ext_csd.erase_group_def & 1) { 1121 /* High Capacity Erase Group Size uses HC timeouts */ 1122 if (arg == MMC_TRIM_ARG) 1123 erase_timeout = card->ext_csd.trim_timeout; 1124 else 1125 erase_timeout = card->ext_csd.hc_erase_timeout; 1126 } else { 1127 /* CSD Erase Group Size uses write timeout */ 1128 unsigned int mult = (10 << card->csd.r2w_factor); 1129 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1130 unsigned int timeout_us; 1131 1132 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1133 if (card->csd.tacc_ns < 1000000) 1134 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1135 else 1136 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1137 1138 /* 1139 * ios.clock is only a target. The real clock rate might be 1140 * less but not that much less, so fudge it by multiplying by 2. 1141 */ 1142 timeout_clks <<= 1; 1143 timeout_us += (timeout_clks * 1000) / 1144 (card->host->ios.clock / 1000); 1145 1146 erase_timeout = timeout_us / 1000; 1147 1148 /* 1149 * Theoretically, the calculation could underflow so round up 1150 * to 1ms in that case. 1151 */ 1152 if (!erase_timeout) 1153 erase_timeout = 1; 1154 } 1155 1156 /* Multiplier for secure operations */ 1157 if (arg & MMC_SECURE_ARGS) { 1158 if (arg == MMC_SECURE_ERASE_ARG) 1159 erase_timeout *= card->ext_csd.sec_erase_mult; 1160 else 1161 erase_timeout *= card->ext_csd.sec_trim_mult; 1162 } 1163 1164 erase_timeout *= qty; 1165 1166 /* 1167 * Ensure at least a 1 second timeout for SPI as per 1168 * 'mmc_set_data_timeout()' 1169 */ 1170 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1171 erase_timeout = 1000; 1172 1173 cmd->erase_timeout = erase_timeout; 1174 } 1175 1176 static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1177 struct mmc_command *cmd, unsigned int arg, 1178 unsigned int qty) 1179 { 1180 if (card->ssr.erase_timeout) { 1181 /* Erase timeout specified in SD Status Register (SSR) */ 1182 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1183 card->ssr.erase_offset; 1184 } else { 1185 /* 1186 * Erase timeout not specified in SD Status Register (SSR) so 1187 * use 250ms per write block. 1188 */ 1189 cmd->erase_timeout = 250 * qty; 1190 } 1191 1192 /* Must not be less than 1 second */ 1193 if (cmd->erase_timeout < 1000) 1194 cmd->erase_timeout = 1000; 1195 } 1196 1197 static void mmc_set_erase_timeout(struct mmc_card *card, 1198 struct mmc_command *cmd, unsigned int arg, 1199 unsigned int qty) 1200 { 1201 if (mmc_card_sd(card)) 1202 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1203 else 1204 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1205 } 1206 1207 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1208 unsigned int to, unsigned int arg) 1209 { 1210 struct mmc_command cmd; 1211 unsigned int qty = 0; 1212 int err; 1213 1214 /* 1215 * qty is used to calculate the erase timeout which depends on how many 1216 * erase groups (or allocation units in SD terminology) are affected. 1217 * We count erasing part of an erase group as one erase group. 1218 * For SD, the allocation units are always a power of 2. For MMC, the 1219 * erase group size is almost certainly also power of 2, but it does not 1220 * seem to insist on that in the JEDEC standard, so we fall back to 1221 * division in that case. SD may not specify an allocation unit size, 1222 * in which case the timeout is based on the number of write blocks. 1223 * 1224 * Note that the timeout for secure trim 2 will only be correct if the 1225 * number of erase groups specified is the same as the total of all 1226 * preceding secure trim 1 commands. Since the power may have been 1227 * lost since the secure trim 1 commands occurred, it is generally 1228 * impossible to calculate the secure trim 2 timeout correctly. 1229 */ 1230 if (card->erase_shift) 1231 qty += ((to >> card->erase_shift) - 1232 (from >> card->erase_shift)) + 1; 1233 else if (mmc_card_sd(card)) 1234 qty += to - from + 1; 1235 else 1236 qty += ((to / card->erase_size) - 1237 (from / card->erase_size)) + 1; 1238 1239 if (!mmc_card_blockaddr(card)) { 1240 from <<= 9; 1241 to <<= 9; 1242 } 1243 1244 memset(&cmd, 0, sizeof(struct mmc_command)); 1245 if (mmc_card_sd(card)) 1246 cmd.opcode = SD_ERASE_WR_BLK_START; 1247 else 1248 cmd.opcode = MMC_ERASE_GROUP_START; 1249 cmd.arg = from; 1250 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1251 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1252 if (err) { 1253 printk(KERN_ERR "mmc_erase: group start error %d, " 1254 "status %#x\n", err, cmd.resp[0]); 1255 err = -EINVAL; 1256 goto out; 1257 } 1258 1259 memset(&cmd, 0, sizeof(struct mmc_command)); 1260 if (mmc_card_sd(card)) 1261 cmd.opcode = SD_ERASE_WR_BLK_END; 1262 else 1263 cmd.opcode = MMC_ERASE_GROUP_END; 1264 cmd.arg = to; 1265 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1266 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1267 if (err) { 1268 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1269 err, cmd.resp[0]); 1270 err = -EINVAL; 1271 goto out; 1272 } 1273 1274 memset(&cmd, 0, sizeof(struct mmc_command)); 1275 cmd.opcode = MMC_ERASE; 1276 cmd.arg = arg; 1277 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1278 mmc_set_erase_timeout(card, &cmd, arg, qty); 1279 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1280 if (err) { 1281 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1282 err, cmd.resp[0]); 1283 err = -EIO; 1284 goto out; 1285 } 1286 1287 if (mmc_host_is_spi(card->host)) 1288 goto out; 1289 1290 do { 1291 memset(&cmd, 0, sizeof(struct mmc_command)); 1292 cmd.opcode = MMC_SEND_STATUS; 1293 cmd.arg = card->rca << 16; 1294 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1295 /* Do not retry else we can't see errors */ 1296 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1297 if (err || (cmd.resp[0] & 0xFDF92000)) { 1298 printk(KERN_ERR "error %d requesting status %#x\n", 1299 err, cmd.resp[0]); 1300 err = -EIO; 1301 goto out; 1302 } 1303 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1304 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1305 out: 1306 return err; 1307 } 1308 1309 /** 1310 * mmc_erase - erase sectors. 1311 * @card: card to erase 1312 * @from: first sector to erase 1313 * @nr: number of sectors to erase 1314 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1315 * 1316 * Caller must claim host before calling this function. 1317 */ 1318 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1319 unsigned int arg) 1320 { 1321 unsigned int rem, to = from + nr; 1322 1323 if (!(card->host->caps & MMC_CAP_ERASE) || 1324 !(card->csd.cmdclass & CCC_ERASE)) 1325 return -EOPNOTSUPP; 1326 1327 if (!card->erase_size) 1328 return -EOPNOTSUPP; 1329 1330 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1331 return -EOPNOTSUPP; 1332 1333 if ((arg & MMC_SECURE_ARGS) && 1334 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1335 return -EOPNOTSUPP; 1336 1337 if ((arg & MMC_TRIM_ARGS) && 1338 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1339 return -EOPNOTSUPP; 1340 1341 if (arg == MMC_SECURE_ERASE_ARG) { 1342 if (from % card->erase_size || nr % card->erase_size) 1343 return -EINVAL; 1344 } 1345 1346 if (arg == MMC_ERASE_ARG) { 1347 rem = from % card->erase_size; 1348 if (rem) { 1349 rem = card->erase_size - rem; 1350 from += rem; 1351 if (nr > rem) 1352 nr -= rem; 1353 else 1354 return 0; 1355 } 1356 rem = nr % card->erase_size; 1357 if (rem) 1358 nr -= rem; 1359 } 1360 1361 if (nr == 0) 1362 return 0; 1363 1364 to = from + nr; 1365 1366 if (to <= from) 1367 return -EINVAL; 1368 1369 /* 'from' and 'to' are inclusive */ 1370 to -= 1; 1371 1372 return mmc_do_erase(card, from, to, arg); 1373 } 1374 EXPORT_SYMBOL(mmc_erase); 1375 1376 int mmc_can_erase(struct mmc_card *card) 1377 { 1378 if ((card->host->caps & MMC_CAP_ERASE) && 1379 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1380 return 1; 1381 return 0; 1382 } 1383 EXPORT_SYMBOL(mmc_can_erase); 1384 1385 int mmc_can_trim(struct mmc_card *card) 1386 { 1387 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1388 return 1; 1389 return 0; 1390 } 1391 EXPORT_SYMBOL(mmc_can_trim); 1392 1393 int mmc_can_secure_erase_trim(struct mmc_card *card) 1394 { 1395 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1396 return 1; 1397 return 0; 1398 } 1399 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1400 1401 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1402 unsigned int nr) 1403 { 1404 if (!card->erase_size) 1405 return 0; 1406 if (from % card->erase_size || nr % card->erase_size) 1407 return 0; 1408 return 1; 1409 } 1410 EXPORT_SYMBOL(mmc_erase_group_aligned); 1411 1412 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1413 { 1414 struct mmc_command cmd; 1415 1416 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1417 return 0; 1418 1419 memset(&cmd, 0, sizeof(struct mmc_command)); 1420 cmd.opcode = MMC_SET_BLOCKLEN; 1421 cmd.arg = blocklen; 1422 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1423 return mmc_wait_for_cmd(card->host, &cmd, 5); 1424 } 1425 EXPORT_SYMBOL(mmc_set_blocklen); 1426 1427 void mmc_rescan(struct work_struct *work) 1428 { 1429 struct mmc_host *host = 1430 container_of(work, struct mmc_host, detect.work); 1431 u32 ocr; 1432 int err; 1433 unsigned long flags; 1434 int i; 1435 const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 1436 1437 spin_lock_irqsave(&host->lock, flags); 1438 1439 if (host->rescan_disable) { 1440 spin_unlock_irqrestore(&host->lock, flags); 1441 return; 1442 } 1443 1444 spin_unlock_irqrestore(&host->lock, flags); 1445 1446 1447 mmc_bus_get(host); 1448 1449 /* if there is a card registered, check whether it is still present */ 1450 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1451 host->bus_ops->detect(host); 1452 1453 mmc_bus_put(host); 1454 1455 1456 mmc_bus_get(host); 1457 1458 /* if there still is a card present, stop here */ 1459 if (host->bus_ops != NULL) { 1460 mmc_bus_put(host); 1461 goto out; 1462 } 1463 1464 /* detect a newly inserted card */ 1465 1466 /* 1467 * Only we can add a new handler, so it's safe to 1468 * release the lock here. 1469 */ 1470 mmc_bus_put(host); 1471 1472 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1473 goto out; 1474 1475 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1476 mmc_claim_host(host); 1477 1478 if (freqs[i] >= host->f_min) 1479 host->f_init = freqs[i]; 1480 else if (!i || freqs[i-1] > host->f_min) 1481 host->f_init = host->f_min; 1482 else { 1483 mmc_release_host(host); 1484 goto out; 1485 } 1486 #ifdef CONFIG_MMC_DEBUG 1487 pr_info("%s: %s: trying to init card at %u Hz\n", 1488 mmc_hostname(host), __func__, host->f_init); 1489 #endif 1490 mmc_power_up(host); 1491 sdio_reset(host); 1492 mmc_go_idle(host); 1493 1494 mmc_send_if_cond(host, host->ocr_avail); 1495 1496 /* 1497 * First we search for SDIO... 1498 */ 1499 err = mmc_send_io_op_cond(host, 0, &ocr); 1500 if (!err) { 1501 if (mmc_attach_sdio(host, ocr)) { 1502 mmc_claim_host(host); 1503 /* 1504 * Try SDMEM (but not MMC) even if SDIO 1505 * is broken. 1506 */ 1507 if (mmc_send_app_op_cond(host, 0, &ocr)) 1508 goto out_fail; 1509 1510 if (mmc_attach_sd(host, ocr)) 1511 mmc_power_off(host); 1512 } 1513 goto out; 1514 } 1515 1516 /* 1517 * ...then normal SD... 1518 */ 1519 err = mmc_send_app_op_cond(host, 0, &ocr); 1520 if (!err) { 1521 if (mmc_attach_sd(host, ocr)) 1522 mmc_power_off(host); 1523 goto out; 1524 } 1525 1526 /* 1527 * ...and finally MMC. 1528 */ 1529 err = mmc_send_op_cond(host, 0, &ocr); 1530 if (!err) { 1531 if (mmc_attach_mmc(host, ocr)) 1532 mmc_power_off(host); 1533 goto out; 1534 } 1535 1536 out_fail: 1537 mmc_release_host(host); 1538 mmc_power_off(host); 1539 } 1540 out: 1541 if (host->caps & MMC_CAP_NEEDS_POLL) 1542 mmc_schedule_delayed_work(&host->detect, HZ); 1543 } 1544 1545 void mmc_start_host(struct mmc_host *host) 1546 { 1547 mmc_power_off(host); 1548 mmc_detect_change(host, 0); 1549 } 1550 1551 void mmc_stop_host(struct mmc_host *host) 1552 { 1553 #ifdef CONFIG_MMC_DEBUG 1554 unsigned long flags; 1555 spin_lock_irqsave(&host->lock, flags); 1556 host->removed = 1; 1557 spin_unlock_irqrestore(&host->lock, flags); 1558 #endif 1559 1560 if (host->caps & MMC_CAP_DISABLE) 1561 cancel_delayed_work(&host->disable); 1562 cancel_delayed_work_sync(&host->detect); 1563 mmc_flush_scheduled_work(); 1564 1565 /* clear pm flags now and let card drivers set them as needed */ 1566 host->pm_flags = 0; 1567 1568 mmc_bus_get(host); 1569 if (host->bus_ops && !host->bus_dead) { 1570 if (host->bus_ops->remove) 1571 host->bus_ops->remove(host); 1572 1573 mmc_claim_host(host); 1574 mmc_detach_bus(host); 1575 mmc_release_host(host); 1576 mmc_bus_put(host); 1577 return; 1578 } 1579 mmc_bus_put(host); 1580 1581 BUG_ON(host->card); 1582 1583 mmc_power_off(host); 1584 } 1585 1586 int mmc_power_save_host(struct mmc_host *host) 1587 { 1588 int ret = 0; 1589 1590 mmc_bus_get(host); 1591 1592 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1593 mmc_bus_put(host); 1594 return -EINVAL; 1595 } 1596 1597 if (host->bus_ops->power_save) 1598 ret = host->bus_ops->power_save(host); 1599 1600 mmc_bus_put(host); 1601 1602 mmc_power_off(host); 1603 1604 return ret; 1605 } 1606 EXPORT_SYMBOL(mmc_power_save_host); 1607 1608 int mmc_power_restore_host(struct mmc_host *host) 1609 { 1610 int ret; 1611 1612 mmc_bus_get(host); 1613 1614 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1615 mmc_bus_put(host); 1616 return -EINVAL; 1617 } 1618 1619 mmc_power_up(host); 1620 ret = host->bus_ops->power_restore(host); 1621 1622 mmc_bus_put(host); 1623 1624 return ret; 1625 } 1626 EXPORT_SYMBOL(mmc_power_restore_host); 1627 1628 int mmc_card_awake(struct mmc_host *host) 1629 { 1630 int err = -ENOSYS; 1631 1632 mmc_bus_get(host); 1633 1634 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1635 err = host->bus_ops->awake(host); 1636 1637 mmc_bus_put(host); 1638 1639 return err; 1640 } 1641 EXPORT_SYMBOL(mmc_card_awake); 1642 1643 int mmc_card_sleep(struct mmc_host *host) 1644 { 1645 int err = -ENOSYS; 1646 1647 mmc_bus_get(host); 1648 1649 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1650 err = host->bus_ops->sleep(host); 1651 1652 mmc_bus_put(host); 1653 1654 return err; 1655 } 1656 EXPORT_SYMBOL(mmc_card_sleep); 1657 1658 int mmc_card_can_sleep(struct mmc_host *host) 1659 { 1660 struct mmc_card *card = host->card; 1661 1662 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1663 return 1; 1664 return 0; 1665 } 1666 EXPORT_SYMBOL(mmc_card_can_sleep); 1667 1668 #ifdef CONFIG_PM 1669 1670 /** 1671 * mmc_suspend_host - suspend a host 1672 * @host: mmc host 1673 */ 1674 int mmc_suspend_host(struct mmc_host *host) 1675 { 1676 int err = 0; 1677 1678 if (host->caps & MMC_CAP_DISABLE) 1679 cancel_delayed_work(&host->disable); 1680 cancel_delayed_work(&host->detect); 1681 mmc_flush_scheduled_work(); 1682 1683 mmc_bus_get(host); 1684 if (host->bus_ops && !host->bus_dead) { 1685 if (host->bus_ops->suspend) 1686 err = host->bus_ops->suspend(host); 1687 if (err == -ENOSYS || !host->bus_ops->resume) { 1688 /* 1689 * We simply "remove" the card in this case. 1690 * It will be redetected on resume. 1691 */ 1692 if (host->bus_ops->remove) 1693 host->bus_ops->remove(host); 1694 mmc_claim_host(host); 1695 mmc_detach_bus(host); 1696 mmc_release_host(host); 1697 host->pm_flags = 0; 1698 err = 0; 1699 } 1700 } 1701 mmc_bus_put(host); 1702 1703 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1704 mmc_power_off(host); 1705 1706 return err; 1707 } 1708 1709 EXPORT_SYMBOL(mmc_suspend_host); 1710 1711 /** 1712 * mmc_resume_host - resume a previously suspended host 1713 * @host: mmc host 1714 */ 1715 int mmc_resume_host(struct mmc_host *host) 1716 { 1717 int err = 0; 1718 1719 mmc_bus_get(host); 1720 if (host->bus_ops && !host->bus_dead) { 1721 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1722 mmc_power_up(host); 1723 mmc_select_voltage(host, host->ocr); 1724 } 1725 BUG_ON(!host->bus_ops->resume); 1726 err = host->bus_ops->resume(host); 1727 if (err) { 1728 printk(KERN_WARNING "%s: error %d during resume " 1729 "(card was removed?)\n", 1730 mmc_hostname(host), err); 1731 err = 0; 1732 } 1733 } 1734 mmc_bus_put(host); 1735 1736 return err; 1737 } 1738 EXPORT_SYMBOL(mmc_resume_host); 1739 1740 /* Do the card removal on suspend if card is assumed removeable 1741 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 1742 to sync the card. 1743 */ 1744 int mmc_pm_notify(struct notifier_block *notify_block, 1745 unsigned long mode, void *unused) 1746 { 1747 struct mmc_host *host = container_of( 1748 notify_block, struct mmc_host, pm_notify); 1749 unsigned long flags; 1750 1751 1752 switch (mode) { 1753 case PM_HIBERNATION_PREPARE: 1754 case PM_SUSPEND_PREPARE: 1755 1756 spin_lock_irqsave(&host->lock, flags); 1757 host->rescan_disable = 1; 1758 spin_unlock_irqrestore(&host->lock, flags); 1759 cancel_delayed_work_sync(&host->detect); 1760 1761 if (!host->bus_ops || host->bus_ops->suspend) 1762 break; 1763 1764 mmc_claim_host(host); 1765 1766 if (host->bus_ops->remove) 1767 host->bus_ops->remove(host); 1768 1769 mmc_detach_bus(host); 1770 mmc_release_host(host); 1771 host->pm_flags = 0; 1772 break; 1773 1774 case PM_POST_SUSPEND: 1775 case PM_POST_HIBERNATION: 1776 case PM_POST_RESTORE: 1777 1778 spin_lock_irqsave(&host->lock, flags); 1779 host->rescan_disable = 0; 1780 spin_unlock_irqrestore(&host->lock, flags); 1781 mmc_detect_change(host, 0); 1782 1783 } 1784 1785 return 0; 1786 } 1787 #endif 1788 1789 static int __init mmc_init(void) 1790 { 1791 int ret; 1792 1793 workqueue = create_singlethread_workqueue("kmmcd"); 1794 if (!workqueue) 1795 return -ENOMEM; 1796 1797 ret = mmc_register_bus(); 1798 if (ret) 1799 goto destroy_workqueue; 1800 1801 ret = mmc_register_host_class(); 1802 if (ret) 1803 goto unregister_bus; 1804 1805 ret = sdio_register_bus(); 1806 if (ret) 1807 goto unregister_host_class; 1808 1809 return 0; 1810 1811 unregister_host_class: 1812 mmc_unregister_host_class(); 1813 unregister_bus: 1814 mmc_unregister_bus(); 1815 destroy_workqueue: 1816 destroy_workqueue(workqueue); 1817 1818 return ret; 1819 } 1820 1821 static void __exit mmc_exit(void) 1822 { 1823 sdio_unregister_bus(); 1824 mmc_unregister_host_class(); 1825 mmc_unregister_bus(); 1826 destroy_workqueue(workqueue); 1827 } 1828 1829 subsys_initcall(mmc_init); 1830 module_exit(mmc_exit); 1831 1832 MODULE_LICENSE("GPL"); 1833