1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/host.h> 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/sd.h> 31 32 #include "core.h" 33 #include "bus.h" 34 #include "host.h" 35 #include "sdio_bus.h" 36 37 #include "mmc_ops.h" 38 #include "sd_ops.h" 39 #include "sdio_ops.h" 40 41 static struct workqueue_struct *workqueue; 42 43 /* 44 * Enabling software CRCs on the data blocks can be a significant (30%) 45 * performance cost, and for other reasons may not always be desired. 46 * So we allow it it to be disabled. 47 */ 48 int use_spi_crc = 1; 49 module_param(use_spi_crc, bool, 0); 50 51 /* 52 * We normally treat cards as removed during suspend if they are not 53 * known to be on a non-removable bus, to avoid the risk of writing 54 * back data to a different card after resume. Allow this to be 55 * overridden if necessary. 56 */ 57 #ifdef CONFIG_MMC_UNSAFE_RESUME 58 int mmc_assume_removable; 59 #else 60 int mmc_assume_removable = 1; 61 #endif 62 EXPORT_SYMBOL(mmc_assume_removable); 63 module_param_named(removable, mmc_assume_removable, bool, 0644); 64 MODULE_PARM_DESC( 65 removable, 66 "MMC/SD cards are removable and may be removed during suspend"); 67 68 /* 69 * Internal function. Schedule delayed work in the MMC work queue. 70 */ 71 static int mmc_schedule_delayed_work(struct delayed_work *work, 72 unsigned long delay) 73 { 74 return queue_delayed_work(workqueue, work, delay); 75 } 76 77 /* 78 * Internal function. Flush all scheduled work from the MMC work queue. 79 */ 80 static void mmc_flush_scheduled_work(void) 81 { 82 flush_workqueue(workqueue); 83 } 84 85 /** 86 * mmc_request_done - finish processing an MMC request 87 * @host: MMC host which completed request 88 * @mrq: MMC request which request 89 * 90 * MMC drivers should call this function when they have completed 91 * their processing of a request. 92 */ 93 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 94 { 95 struct mmc_command *cmd = mrq->cmd; 96 int err = cmd->error; 97 98 if (err && cmd->retries && mmc_host_is_spi(host)) { 99 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 100 cmd->retries = 0; 101 } 102 103 if (err && cmd->retries) { 104 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 105 mmc_hostname(host), cmd->opcode, err); 106 107 cmd->retries--; 108 cmd->error = 0; 109 host->ops->request(host, mrq); 110 } else { 111 led_trigger_event(host->led, LED_OFF); 112 113 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 114 mmc_hostname(host), cmd->opcode, err, 115 cmd->resp[0], cmd->resp[1], 116 cmd->resp[2], cmd->resp[3]); 117 118 if (mrq->data) { 119 pr_debug("%s: %d bytes transferred: %d\n", 120 mmc_hostname(host), 121 mrq->data->bytes_xfered, mrq->data->error); 122 } 123 124 if (mrq->stop) { 125 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 126 mmc_hostname(host), mrq->stop->opcode, 127 mrq->stop->error, 128 mrq->stop->resp[0], mrq->stop->resp[1], 129 mrq->stop->resp[2], mrq->stop->resp[3]); 130 } 131 132 if (mrq->done) 133 mrq->done(mrq); 134 135 mmc_host_clk_gate(host); 136 } 137 } 138 139 EXPORT_SYMBOL(mmc_request_done); 140 141 static void 142 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 143 { 144 #ifdef CONFIG_MMC_DEBUG 145 unsigned int i, sz; 146 struct scatterlist *sg; 147 #endif 148 149 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 150 mmc_hostname(host), mrq->cmd->opcode, 151 mrq->cmd->arg, mrq->cmd->flags); 152 153 if (mrq->data) { 154 pr_debug("%s: blksz %d blocks %d flags %08x " 155 "tsac %d ms nsac %d\n", 156 mmc_hostname(host), mrq->data->blksz, 157 mrq->data->blocks, mrq->data->flags, 158 mrq->data->timeout_ns / 1000000, 159 mrq->data->timeout_clks); 160 } 161 162 if (mrq->stop) { 163 pr_debug("%s: CMD%u arg %08x flags %08x\n", 164 mmc_hostname(host), mrq->stop->opcode, 165 mrq->stop->arg, mrq->stop->flags); 166 } 167 168 WARN_ON(!host->claimed); 169 170 led_trigger_event(host->led, LED_FULL); 171 172 mrq->cmd->error = 0; 173 mrq->cmd->mrq = mrq; 174 if (mrq->data) { 175 BUG_ON(mrq->data->blksz > host->max_blk_size); 176 BUG_ON(mrq->data->blocks > host->max_blk_count); 177 BUG_ON(mrq->data->blocks * mrq->data->blksz > 178 host->max_req_size); 179 180 #ifdef CONFIG_MMC_DEBUG 181 sz = 0; 182 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 183 sz += sg->length; 184 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 185 #endif 186 187 mrq->cmd->data = mrq->data; 188 mrq->data->error = 0; 189 mrq->data->mrq = mrq; 190 if (mrq->stop) { 191 mrq->data->stop = mrq->stop; 192 mrq->stop->error = 0; 193 mrq->stop->mrq = mrq; 194 } 195 } 196 mmc_host_clk_ungate(host); 197 host->ops->request(host, mrq); 198 } 199 200 static void mmc_wait_done(struct mmc_request *mrq) 201 { 202 complete(mrq->done_data); 203 } 204 205 /** 206 * mmc_wait_for_req - start a request and wait for completion 207 * @host: MMC host to start command 208 * @mrq: MMC request to start 209 * 210 * Start a new MMC custom command request for a host, and wait 211 * for the command to complete. Does not attempt to parse the 212 * response. 213 */ 214 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 215 { 216 DECLARE_COMPLETION_ONSTACK(complete); 217 218 mrq->done_data = &complete; 219 mrq->done = mmc_wait_done; 220 221 mmc_start_request(host, mrq); 222 223 wait_for_completion(&complete); 224 } 225 226 EXPORT_SYMBOL(mmc_wait_for_req); 227 228 /** 229 * mmc_wait_for_cmd - start a command and wait for completion 230 * @host: MMC host to start command 231 * @cmd: MMC command to start 232 * @retries: maximum number of retries 233 * 234 * Start a new MMC command for a host, and wait for the command 235 * to complete. Return any error that occurred while the command 236 * was executing. Do not attempt to parse the response. 237 */ 238 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 239 { 240 struct mmc_request mrq; 241 242 WARN_ON(!host->claimed); 243 244 memset(&mrq, 0, sizeof(struct mmc_request)); 245 246 memset(cmd->resp, 0, sizeof(cmd->resp)); 247 cmd->retries = retries; 248 249 mrq.cmd = cmd; 250 cmd->data = NULL; 251 252 mmc_wait_for_req(host, &mrq); 253 254 return cmd->error; 255 } 256 257 EXPORT_SYMBOL(mmc_wait_for_cmd); 258 259 /** 260 * mmc_set_data_timeout - set the timeout for a data command 261 * @data: data phase for command 262 * @card: the MMC card associated with the data transfer 263 * 264 * Computes the data timeout parameters according to the 265 * correct algorithm given the card type. 266 */ 267 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 268 { 269 unsigned int mult; 270 271 /* 272 * SDIO cards only define an upper 1 s limit on access. 273 */ 274 if (mmc_card_sdio(card)) { 275 data->timeout_ns = 1000000000; 276 data->timeout_clks = 0; 277 return; 278 } 279 280 /* 281 * SD cards use a 100 multiplier rather than 10 282 */ 283 mult = mmc_card_sd(card) ? 100 : 10; 284 285 /* 286 * Scale up the multiplier (and therefore the timeout) by 287 * the r2w factor for writes. 288 */ 289 if (data->flags & MMC_DATA_WRITE) 290 mult <<= card->csd.r2w_factor; 291 292 data->timeout_ns = card->csd.tacc_ns * mult; 293 data->timeout_clks = card->csd.tacc_clks * mult; 294 295 /* 296 * SD cards also have an upper limit on the timeout. 297 */ 298 if (mmc_card_sd(card)) { 299 unsigned int timeout_us, limit_us; 300 301 timeout_us = data->timeout_ns / 1000; 302 if (mmc_host_clk_rate(card->host)) 303 timeout_us += data->timeout_clks * 1000 / 304 (mmc_host_clk_rate(card->host) / 1000); 305 306 if (data->flags & MMC_DATA_WRITE) 307 /* 308 * The limit is really 250 ms, but that is 309 * insufficient for some crappy cards. 310 */ 311 limit_us = 300000; 312 else 313 limit_us = 100000; 314 315 /* 316 * SDHC cards always use these fixed values. 317 */ 318 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 319 data->timeout_ns = limit_us * 1000; 320 data->timeout_clks = 0; 321 } 322 } 323 /* 324 * Some cards need very high timeouts if driven in SPI mode. 325 * The worst observed timeout was 900ms after writing a 326 * continuous stream of data until the internal logic 327 * overflowed. 328 */ 329 if (mmc_host_is_spi(card->host)) { 330 if (data->flags & MMC_DATA_WRITE) { 331 if (data->timeout_ns < 1000000000) 332 data->timeout_ns = 1000000000; /* 1s */ 333 } else { 334 if (data->timeout_ns < 100000000) 335 data->timeout_ns = 100000000; /* 100ms */ 336 } 337 } 338 } 339 EXPORT_SYMBOL(mmc_set_data_timeout); 340 341 /** 342 * mmc_align_data_size - pads a transfer size to a more optimal value 343 * @card: the MMC card associated with the data transfer 344 * @sz: original transfer size 345 * 346 * Pads the original data size with a number of extra bytes in 347 * order to avoid controller bugs and/or performance hits 348 * (e.g. some controllers revert to PIO for certain sizes). 349 * 350 * Returns the improved size, which might be unmodified. 351 * 352 * Note that this function is only relevant when issuing a 353 * single scatter gather entry. 354 */ 355 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 356 { 357 /* 358 * FIXME: We don't have a system for the controller to tell 359 * the core about its problems yet, so for now we just 32-bit 360 * align the size. 361 */ 362 sz = ((sz + 3) / 4) * 4; 363 364 return sz; 365 } 366 EXPORT_SYMBOL(mmc_align_data_size); 367 368 /** 369 * mmc_host_enable - enable a host. 370 * @host: mmc host to enable 371 * 372 * Hosts that support power saving can use the 'enable' and 'disable' 373 * methods to exit and enter power saving states. For more information 374 * see comments for struct mmc_host_ops. 375 */ 376 int mmc_host_enable(struct mmc_host *host) 377 { 378 if (!(host->caps & MMC_CAP_DISABLE)) 379 return 0; 380 381 if (host->en_dis_recurs) 382 return 0; 383 384 if (host->nesting_cnt++) 385 return 0; 386 387 cancel_delayed_work_sync(&host->disable); 388 389 if (host->enabled) 390 return 0; 391 392 if (host->ops->enable) { 393 int err; 394 395 host->en_dis_recurs = 1; 396 err = host->ops->enable(host); 397 host->en_dis_recurs = 0; 398 399 if (err) { 400 pr_debug("%s: enable error %d\n", 401 mmc_hostname(host), err); 402 return err; 403 } 404 } 405 host->enabled = 1; 406 return 0; 407 } 408 EXPORT_SYMBOL(mmc_host_enable); 409 410 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 411 { 412 if (host->ops->disable) { 413 int err; 414 415 host->en_dis_recurs = 1; 416 err = host->ops->disable(host, lazy); 417 host->en_dis_recurs = 0; 418 419 if (err < 0) { 420 pr_debug("%s: disable error %d\n", 421 mmc_hostname(host), err); 422 return err; 423 } 424 if (err > 0) { 425 unsigned long delay = msecs_to_jiffies(err); 426 427 mmc_schedule_delayed_work(&host->disable, delay); 428 } 429 } 430 host->enabled = 0; 431 return 0; 432 } 433 434 /** 435 * mmc_host_disable - disable a host. 436 * @host: mmc host to disable 437 * 438 * Hosts that support power saving can use the 'enable' and 'disable' 439 * methods to exit and enter power saving states. For more information 440 * see comments for struct mmc_host_ops. 441 */ 442 int mmc_host_disable(struct mmc_host *host) 443 { 444 int err; 445 446 if (!(host->caps & MMC_CAP_DISABLE)) 447 return 0; 448 449 if (host->en_dis_recurs) 450 return 0; 451 452 if (--host->nesting_cnt) 453 return 0; 454 455 if (!host->enabled) 456 return 0; 457 458 err = mmc_host_do_disable(host, 0); 459 return err; 460 } 461 EXPORT_SYMBOL(mmc_host_disable); 462 463 /** 464 * __mmc_claim_host - exclusively claim a host 465 * @host: mmc host to claim 466 * @abort: whether or not the operation should be aborted 467 * 468 * Claim a host for a set of operations. If @abort is non null and 469 * dereference a non-zero value then this will return prematurely with 470 * that non-zero value without acquiring the lock. Returns zero 471 * with the lock held otherwise. 472 */ 473 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 474 { 475 DECLARE_WAITQUEUE(wait, current); 476 unsigned long flags; 477 int stop; 478 479 might_sleep(); 480 481 add_wait_queue(&host->wq, &wait); 482 spin_lock_irqsave(&host->lock, flags); 483 while (1) { 484 set_current_state(TASK_UNINTERRUPTIBLE); 485 stop = abort ? atomic_read(abort) : 0; 486 if (stop || !host->claimed || host->claimer == current) 487 break; 488 spin_unlock_irqrestore(&host->lock, flags); 489 schedule(); 490 spin_lock_irqsave(&host->lock, flags); 491 } 492 set_current_state(TASK_RUNNING); 493 if (!stop) { 494 host->claimed = 1; 495 host->claimer = current; 496 host->claim_cnt += 1; 497 } else 498 wake_up(&host->wq); 499 spin_unlock_irqrestore(&host->lock, flags); 500 remove_wait_queue(&host->wq, &wait); 501 if (!stop) 502 mmc_host_enable(host); 503 return stop; 504 } 505 506 EXPORT_SYMBOL(__mmc_claim_host); 507 508 /** 509 * mmc_try_claim_host - try exclusively to claim a host 510 * @host: mmc host to claim 511 * 512 * Returns %1 if the host is claimed, %0 otherwise. 513 */ 514 int mmc_try_claim_host(struct mmc_host *host) 515 { 516 int claimed_host = 0; 517 unsigned long flags; 518 519 spin_lock_irqsave(&host->lock, flags); 520 if (!host->claimed || host->claimer == current) { 521 host->claimed = 1; 522 host->claimer = current; 523 host->claim_cnt += 1; 524 claimed_host = 1; 525 } 526 spin_unlock_irqrestore(&host->lock, flags); 527 return claimed_host; 528 } 529 EXPORT_SYMBOL(mmc_try_claim_host); 530 531 static void mmc_do_release_host(struct mmc_host *host) 532 { 533 unsigned long flags; 534 535 spin_lock_irqsave(&host->lock, flags); 536 if (--host->claim_cnt) { 537 /* Release for nested claim */ 538 spin_unlock_irqrestore(&host->lock, flags); 539 } else { 540 host->claimed = 0; 541 host->claimer = NULL; 542 spin_unlock_irqrestore(&host->lock, flags); 543 wake_up(&host->wq); 544 } 545 } 546 547 void mmc_host_deeper_disable(struct work_struct *work) 548 { 549 struct mmc_host *host = 550 container_of(work, struct mmc_host, disable.work); 551 552 /* If the host is claimed then we do not want to disable it anymore */ 553 if (!mmc_try_claim_host(host)) 554 return; 555 mmc_host_do_disable(host, 1); 556 mmc_do_release_host(host); 557 } 558 559 /** 560 * mmc_host_lazy_disable - lazily disable a host. 561 * @host: mmc host to disable 562 * 563 * Hosts that support power saving can use the 'enable' and 'disable' 564 * methods to exit and enter power saving states. For more information 565 * see comments for struct mmc_host_ops. 566 */ 567 int mmc_host_lazy_disable(struct mmc_host *host) 568 { 569 if (!(host->caps & MMC_CAP_DISABLE)) 570 return 0; 571 572 if (host->en_dis_recurs) 573 return 0; 574 575 if (--host->nesting_cnt) 576 return 0; 577 578 if (!host->enabled) 579 return 0; 580 581 if (host->disable_delay) { 582 mmc_schedule_delayed_work(&host->disable, 583 msecs_to_jiffies(host->disable_delay)); 584 return 0; 585 } else 586 return mmc_host_do_disable(host, 1); 587 } 588 EXPORT_SYMBOL(mmc_host_lazy_disable); 589 590 /** 591 * mmc_release_host - release a host 592 * @host: mmc host to release 593 * 594 * Release a MMC host, allowing others to claim the host 595 * for their operations. 596 */ 597 void mmc_release_host(struct mmc_host *host) 598 { 599 WARN_ON(!host->claimed); 600 601 mmc_host_lazy_disable(host); 602 603 mmc_do_release_host(host); 604 } 605 606 EXPORT_SYMBOL(mmc_release_host); 607 608 /* 609 * Internal function that does the actual ios call to the host driver, 610 * optionally printing some debug output. 611 */ 612 static inline void mmc_set_ios(struct mmc_host *host) 613 { 614 struct mmc_ios *ios = &host->ios; 615 616 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 617 "width %u timing %u\n", 618 mmc_hostname(host), ios->clock, ios->bus_mode, 619 ios->power_mode, ios->chip_select, ios->vdd, 620 ios->bus_width, ios->timing); 621 622 if (ios->clock > 0) 623 mmc_set_ungated(host); 624 host->ops->set_ios(host, ios); 625 } 626 627 /* 628 * Control chip select pin on a host. 629 */ 630 void mmc_set_chip_select(struct mmc_host *host, int mode) 631 { 632 host->ios.chip_select = mode; 633 mmc_set_ios(host); 634 } 635 636 /* 637 * Sets the host clock to the highest possible frequency that 638 * is below "hz". 639 */ 640 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 641 { 642 WARN_ON(hz < host->f_min); 643 644 if (hz > host->f_max) 645 hz = host->f_max; 646 647 host->ios.clock = hz; 648 mmc_set_ios(host); 649 } 650 651 #ifdef CONFIG_MMC_CLKGATE 652 /* 653 * This gates the clock by setting it to 0 Hz. 654 */ 655 void mmc_gate_clock(struct mmc_host *host) 656 { 657 unsigned long flags; 658 659 spin_lock_irqsave(&host->clk_lock, flags); 660 host->clk_old = host->ios.clock; 661 host->ios.clock = 0; 662 host->clk_gated = true; 663 spin_unlock_irqrestore(&host->clk_lock, flags); 664 mmc_set_ios(host); 665 } 666 667 /* 668 * This restores the clock from gating by using the cached 669 * clock value. 670 */ 671 void mmc_ungate_clock(struct mmc_host *host) 672 { 673 /* 674 * We should previously have gated the clock, so the clock shall 675 * be 0 here! The clock may however be 0 during initialization, 676 * when some request operations are performed before setting 677 * the frequency. When ungate is requested in that situation 678 * we just ignore the call. 679 */ 680 if (host->clk_old) { 681 BUG_ON(host->ios.clock); 682 /* This call will also set host->clk_gated to false */ 683 mmc_set_clock(host, host->clk_old); 684 } 685 } 686 687 void mmc_set_ungated(struct mmc_host *host) 688 { 689 unsigned long flags; 690 691 /* 692 * We've been given a new frequency while the clock is gated, 693 * so make sure we regard this as ungating it. 694 */ 695 spin_lock_irqsave(&host->clk_lock, flags); 696 host->clk_gated = false; 697 spin_unlock_irqrestore(&host->clk_lock, flags); 698 } 699 700 #else 701 void mmc_set_ungated(struct mmc_host *host) 702 { 703 } 704 #endif 705 706 /* 707 * Change the bus mode (open drain/push-pull) of a host. 708 */ 709 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 710 { 711 host->ios.bus_mode = mode; 712 mmc_set_ios(host); 713 } 714 715 /* 716 * Change data bus width and DDR mode of a host. 717 */ 718 void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 719 unsigned int ddr) 720 { 721 host->ios.bus_width = width; 722 host->ios.ddr = ddr; 723 mmc_set_ios(host); 724 } 725 726 /* 727 * Change data bus width of a host. 728 */ 729 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 730 { 731 mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); 732 } 733 734 /** 735 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 736 * @vdd: voltage (mV) 737 * @low_bits: prefer low bits in boundary cases 738 * 739 * This function returns the OCR bit number according to the provided @vdd 740 * value. If conversion is not possible a negative errno value returned. 741 * 742 * Depending on the @low_bits flag the function prefers low or high OCR bits 743 * on boundary voltages. For example, 744 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 745 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 746 * 747 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 748 */ 749 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 750 { 751 const int max_bit = ilog2(MMC_VDD_35_36); 752 int bit; 753 754 if (vdd < 1650 || vdd > 3600) 755 return -EINVAL; 756 757 if (vdd >= 1650 && vdd <= 1950) 758 return ilog2(MMC_VDD_165_195); 759 760 if (low_bits) 761 vdd -= 1; 762 763 /* Base 2000 mV, step 100 mV, bit's base 8. */ 764 bit = (vdd - 2000) / 100 + 8; 765 if (bit > max_bit) 766 return max_bit; 767 return bit; 768 } 769 770 /** 771 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 772 * @vdd_min: minimum voltage value (mV) 773 * @vdd_max: maximum voltage value (mV) 774 * 775 * This function returns the OCR mask bits according to the provided @vdd_min 776 * and @vdd_max values. If conversion is not possible the function returns 0. 777 * 778 * Notes wrt boundary cases: 779 * This function sets the OCR bits for all boundary voltages, for example 780 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 781 * MMC_VDD_34_35 mask. 782 */ 783 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 784 { 785 u32 mask = 0; 786 787 if (vdd_max < vdd_min) 788 return 0; 789 790 /* Prefer high bits for the boundary vdd_max values. */ 791 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 792 if (vdd_max < 0) 793 return 0; 794 795 /* Prefer low bits for the boundary vdd_min values. */ 796 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 797 if (vdd_min < 0) 798 return 0; 799 800 /* Fill the mask, from max bit to min bit. */ 801 while (vdd_max >= vdd_min) 802 mask |= 1 << vdd_max--; 803 804 return mask; 805 } 806 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 807 808 #ifdef CONFIG_REGULATOR 809 810 /** 811 * mmc_regulator_get_ocrmask - return mask of supported voltages 812 * @supply: regulator to use 813 * 814 * This returns either a negative errno, or a mask of voltages that 815 * can be provided to MMC/SD/SDIO devices using the specified voltage 816 * regulator. This would normally be called before registering the 817 * MMC host adapter. 818 */ 819 int mmc_regulator_get_ocrmask(struct regulator *supply) 820 { 821 int result = 0; 822 int count; 823 int i; 824 825 count = regulator_count_voltages(supply); 826 if (count < 0) 827 return count; 828 829 for (i = 0; i < count; i++) { 830 int vdd_uV; 831 int vdd_mV; 832 833 vdd_uV = regulator_list_voltage(supply, i); 834 if (vdd_uV <= 0) 835 continue; 836 837 vdd_mV = vdd_uV / 1000; 838 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 839 } 840 841 return result; 842 } 843 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 844 845 /** 846 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 847 * @mmc: the host to regulate 848 * @supply: regulator to use 849 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 850 * 851 * Returns zero on success, else negative errno. 852 * 853 * MMC host drivers may use this to enable or disable a regulator using 854 * a particular supply voltage. This would normally be called from the 855 * set_ios() method. 856 */ 857 int mmc_regulator_set_ocr(struct mmc_host *mmc, 858 struct regulator *supply, 859 unsigned short vdd_bit) 860 { 861 int result = 0; 862 int min_uV, max_uV; 863 864 if (vdd_bit) { 865 int tmp; 866 int voltage; 867 868 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 869 * bits this regulator doesn't quite support ... don't 870 * be too picky, most cards and regulators are OK with 871 * a 0.1V range goof (it's a small error percentage). 872 */ 873 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 874 if (tmp == 0) { 875 min_uV = 1650 * 1000; 876 max_uV = 1950 * 1000; 877 } else { 878 min_uV = 1900 * 1000 + tmp * 100 * 1000; 879 max_uV = min_uV + 100 * 1000; 880 } 881 882 /* avoid needless changes to this voltage; the regulator 883 * might not allow this operation 884 */ 885 voltage = regulator_get_voltage(supply); 886 if (voltage < 0) 887 result = voltage; 888 else if (voltage < min_uV || voltage > max_uV) 889 result = regulator_set_voltage(supply, min_uV, max_uV); 890 else 891 result = 0; 892 893 if (result == 0 && !mmc->regulator_enabled) { 894 result = regulator_enable(supply); 895 if (!result) 896 mmc->regulator_enabled = true; 897 } 898 } else if (mmc->regulator_enabled) { 899 result = regulator_disable(supply); 900 if (result == 0) 901 mmc->regulator_enabled = false; 902 } 903 904 if (result) 905 dev_err(mmc_dev(mmc), 906 "could not set regulator OCR (%d)\n", result); 907 return result; 908 } 909 EXPORT_SYMBOL(mmc_regulator_set_ocr); 910 911 #endif /* CONFIG_REGULATOR */ 912 913 /* 914 * Mask off any voltages we don't support and select 915 * the lowest voltage 916 */ 917 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 918 { 919 int bit; 920 921 ocr &= host->ocr_avail; 922 923 bit = ffs(ocr); 924 if (bit) { 925 bit -= 1; 926 927 ocr &= 3 << bit; 928 929 host->ios.vdd = bit; 930 mmc_set_ios(host); 931 } else { 932 pr_warning("%s: host doesn't support card's voltages\n", 933 mmc_hostname(host)); 934 ocr = 0; 935 } 936 937 return ocr; 938 } 939 940 /* 941 * Select timing parameters for host. 942 */ 943 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 944 { 945 host->ios.timing = timing; 946 mmc_set_ios(host); 947 } 948 949 /* 950 * Apply power to the MMC stack. This is a two-stage process. 951 * First, we enable power to the card without the clock running. 952 * We then wait a bit for the power to stabilise. Finally, 953 * enable the bus drivers and clock to the card. 954 * 955 * We must _NOT_ enable the clock prior to power stablising. 956 * 957 * If a host does all the power sequencing itself, ignore the 958 * initial MMC_POWER_UP stage. 959 */ 960 static void mmc_power_up(struct mmc_host *host) 961 { 962 int bit; 963 964 /* If ocr is set, we use it */ 965 if (host->ocr) 966 bit = ffs(host->ocr) - 1; 967 else 968 bit = fls(host->ocr_avail) - 1; 969 970 host->ios.vdd = bit; 971 if (mmc_host_is_spi(host)) { 972 host->ios.chip_select = MMC_CS_HIGH; 973 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 974 } else { 975 host->ios.chip_select = MMC_CS_DONTCARE; 976 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 977 } 978 host->ios.power_mode = MMC_POWER_UP; 979 host->ios.bus_width = MMC_BUS_WIDTH_1; 980 host->ios.timing = MMC_TIMING_LEGACY; 981 mmc_set_ios(host); 982 983 /* 984 * This delay should be sufficient to allow the power supply 985 * to reach the minimum voltage. 986 */ 987 mmc_delay(10); 988 989 host->ios.clock = host->f_init; 990 991 host->ios.power_mode = MMC_POWER_ON; 992 mmc_set_ios(host); 993 994 /* 995 * This delay must be at least 74 clock sizes, or 1 ms, or the 996 * time required to reach a stable voltage. 997 */ 998 mmc_delay(10); 999 } 1000 1001 static void mmc_power_off(struct mmc_host *host) 1002 { 1003 host->ios.clock = 0; 1004 host->ios.vdd = 0; 1005 if (!mmc_host_is_spi(host)) { 1006 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1007 host->ios.chip_select = MMC_CS_DONTCARE; 1008 } 1009 host->ios.power_mode = MMC_POWER_OFF; 1010 host->ios.bus_width = MMC_BUS_WIDTH_1; 1011 host->ios.timing = MMC_TIMING_LEGACY; 1012 mmc_set_ios(host); 1013 } 1014 1015 /* 1016 * Cleanup when the last reference to the bus operator is dropped. 1017 */ 1018 static void __mmc_release_bus(struct mmc_host *host) 1019 { 1020 BUG_ON(!host); 1021 BUG_ON(host->bus_refs); 1022 BUG_ON(!host->bus_dead); 1023 1024 host->bus_ops = NULL; 1025 } 1026 1027 /* 1028 * Increase reference count of bus operator 1029 */ 1030 static inline void mmc_bus_get(struct mmc_host *host) 1031 { 1032 unsigned long flags; 1033 1034 spin_lock_irqsave(&host->lock, flags); 1035 host->bus_refs++; 1036 spin_unlock_irqrestore(&host->lock, flags); 1037 } 1038 1039 /* 1040 * Decrease reference count of bus operator and free it if 1041 * it is the last reference. 1042 */ 1043 static inline void mmc_bus_put(struct mmc_host *host) 1044 { 1045 unsigned long flags; 1046 1047 spin_lock_irqsave(&host->lock, flags); 1048 host->bus_refs--; 1049 if ((host->bus_refs == 0) && host->bus_ops) 1050 __mmc_release_bus(host); 1051 spin_unlock_irqrestore(&host->lock, flags); 1052 } 1053 1054 /* 1055 * Assign a mmc bus handler to a host. Only one bus handler may control a 1056 * host at any given time. 1057 */ 1058 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1059 { 1060 unsigned long flags; 1061 1062 BUG_ON(!host); 1063 BUG_ON(!ops); 1064 1065 WARN_ON(!host->claimed); 1066 1067 spin_lock_irqsave(&host->lock, flags); 1068 1069 BUG_ON(host->bus_ops); 1070 BUG_ON(host->bus_refs); 1071 1072 host->bus_ops = ops; 1073 host->bus_refs = 1; 1074 host->bus_dead = 0; 1075 1076 spin_unlock_irqrestore(&host->lock, flags); 1077 } 1078 1079 /* 1080 * Remove the current bus handler from a host. Assumes that there are 1081 * no interesting cards left, so the bus is powered down. 1082 */ 1083 void mmc_detach_bus(struct mmc_host *host) 1084 { 1085 unsigned long flags; 1086 1087 BUG_ON(!host); 1088 1089 WARN_ON(!host->claimed); 1090 WARN_ON(!host->bus_ops); 1091 1092 spin_lock_irqsave(&host->lock, flags); 1093 1094 host->bus_dead = 1; 1095 1096 spin_unlock_irqrestore(&host->lock, flags); 1097 1098 mmc_power_off(host); 1099 1100 mmc_bus_put(host); 1101 } 1102 1103 /** 1104 * mmc_detect_change - process change of state on a MMC socket 1105 * @host: host which changed state. 1106 * @delay: optional delay to wait before detection (jiffies) 1107 * 1108 * MMC drivers should call this when they detect a card has been 1109 * inserted or removed. The MMC layer will confirm that any 1110 * present card is still functional, and initialize any newly 1111 * inserted. 1112 */ 1113 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1114 { 1115 #ifdef CONFIG_MMC_DEBUG 1116 unsigned long flags; 1117 spin_lock_irqsave(&host->lock, flags); 1118 WARN_ON(host->removed); 1119 spin_unlock_irqrestore(&host->lock, flags); 1120 #endif 1121 1122 mmc_schedule_delayed_work(&host->detect, delay); 1123 } 1124 1125 EXPORT_SYMBOL(mmc_detect_change); 1126 1127 void mmc_init_erase(struct mmc_card *card) 1128 { 1129 unsigned int sz; 1130 1131 if (is_power_of_2(card->erase_size)) 1132 card->erase_shift = ffs(card->erase_size) - 1; 1133 else 1134 card->erase_shift = 0; 1135 1136 /* 1137 * It is possible to erase an arbitrarily large area of an SD or MMC 1138 * card. That is not desirable because it can take a long time 1139 * (minutes) potentially delaying more important I/O, and also the 1140 * timeout calculations become increasingly hugely over-estimated. 1141 * Consequently, 'pref_erase' is defined as a guide to limit erases 1142 * to that size and alignment. 1143 * 1144 * For SD cards that define Allocation Unit size, limit erases to one 1145 * Allocation Unit at a time. For MMC cards that define High Capacity 1146 * Erase Size, whether it is switched on or not, limit to that size. 1147 * Otherwise just have a stab at a good value. For modern cards it 1148 * will end up being 4MiB. Note that if the value is too small, it 1149 * can end up taking longer to erase. 1150 */ 1151 if (mmc_card_sd(card) && card->ssr.au) { 1152 card->pref_erase = card->ssr.au; 1153 card->erase_shift = ffs(card->ssr.au) - 1; 1154 } else if (card->ext_csd.hc_erase_size) { 1155 card->pref_erase = card->ext_csd.hc_erase_size; 1156 } else { 1157 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1158 if (sz < 128) 1159 card->pref_erase = 512 * 1024 / 512; 1160 else if (sz < 512) 1161 card->pref_erase = 1024 * 1024 / 512; 1162 else if (sz < 1024) 1163 card->pref_erase = 2 * 1024 * 1024 / 512; 1164 else 1165 card->pref_erase = 4 * 1024 * 1024 / 512; 1166 if (card->pref_erase < card->erase_size) 1167 card->pref_erase = card->erase_size; 1168 else { 1169 sz = card->pref_erase % card->erase_size; 1170 if (sz) 1171 card->pref_erase += card->erase_size - sz; 1172 } 1173 } 1174 } 1175 1176 static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1177 struct mmc_command *cmd, 1178 unsigned int arg, unsigned int qty) 1179 { 1180 unsigned int erase_timeout; 1181 1182 if (card->ext_csd.erase_group_def & 1) { 1183 /* High Capacity Erase Group Size uses HC timeouts */ 1184 if (arg == MMC_TRIM_ARG) 1185 erase_timeout = card->ext_csd.trim_timeout; 1186 else 1187 erase_timeout = card->ext_csd.hc_erase_timeout; 1188 } else { 1189 /* CSD Erase Group Size uses write timeout */ 1190 unsigned int mult = (10 << card->csd.r2w_factor); 1191 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1192 unsigned int timeout_us; 1193 1194 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1195 if (card->csd.tacc_ns < 1000000) 1196 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1197 else 1198 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1199 1200 /* 1201 * ios.clock is only a target. The real clock rate might be 1202 * less but not that much less, so fudge it by multiplying by 2. 1203 */ 1204 timeout_clks <<= 1; 1205 timeout_us += (timeout_clks * 1000) / 1206 (card->host->ios.clock / 1000); 1207 1208 erase_timeout = timeout_us / 1000; 1209 1210 /* 1211 * Theoretically, the calculation could underflow so round up 1212 * to 1ms in that case. 1213 */ 1214 if (!erase_timeout) 1215 erase_timeout = 1; 1216 } 1217 1218 /* Multiplier for secure operations */ 1219 if (arg & MMC_SECURE_ARGS) { 1220 if (arg == MMC_SECURE_ERASE_ARG) 1221 erase_timeout *= card->ext_csd.sec_erase_mult; 1222 else 1223 erase_timeout *= card->ext_csd.sec_trim_mult; 1224 } 1225 1226 erase_timeout *= qty; 1227 1228 /* 1229 * Ensure at least a 1 second timeout for SPI as per 1230 * 'mmc_set_data_timeout()' 1231 */ 1232 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1233 erase_timeout = 1000; 1234 1235 cmd->erase_timeout = erase_timeout; 1236 } 1237 1238 static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1239 struct mmc_command *cmd, unsigned int arg, 1240 unsigned int qty) 1241 { 1242 if (card->ssr.erase_timeout) { 1243 /* Erase timeout specified in SD Status Register (SSR) */ 1244 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1245 card->ssr.erase_offset; 1246 } else { 1247 /* 1248 * Erase timeout not specified in SD Status Register (SSR) so 1249 * use 250ms per write block. 1250 */ 1251 cmd->erase_timeout = 250 * qty; 1252 } 1253 1254 /* Must not be less than 1 second */ 1255 if (cmd->erase_timeout < 1000) 1256 cmd->erase_timeout = 1000; 1257 } 1258 1259 static void mmc_set_erase_timeout(struct mmc_card *card, 1260 struct mmc_command *cmd, unsigned int arg, 1261 unsigned int qty) 1262 { 1263 if (mmc_card_sd(card)) 1264 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1265 else 1266 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1267 } 1268 1269 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1270 unsigned int to, unsigned int arg) 1271 { 1272 struct mmc_command cmd; 1273 unsigned int qty = 0; 1274 int err; 1275 1276 /* 1277 * qty is used to calculate the erase timeout which depends on how many 1278 * erase groups (or allocation units in SD terminology) are affected. 1279 * We count erasing part of an erase group as one erase group. 1280 * For SD, the allocation units are always a power of 2. For MMC, the 1281 * erase group size is almost certainly also power of 2, but it does not 1282 * seem to insist on that in the JEDEC standard, so we fall back to 1283 * division in that case. SD may not specify an allocation unit size, 1284 * in which case the timeout is based on the number of write blocks. 1285 * 1286 * Note that the timeout for secure trim 2 will only be correct if the 1287 * number of erase groups specified is the same as the total of all 1288 * preceding secure trim 1 commands. Since the power may have been 1289 * lost since the secure trim 1 commands occurred, it is generally 1290 * impossible to calculate the secure trim 2 timeout correctly. 1291 */ 1292 if (card->erase_shift) 1293 qty += ((to >> card->erase_shift) - 1294 (from >> card->erase_shift)) + 1; 1295 else if (mmc_card_sd(card)) 1296 qty += to - from + 1; 1297 else 1298 qty += ((to / card->erase_size) - 1299 (from / card->erase_size)) + 1; 1300 1301 if (!mmc_card_blockaddr(card)) { 1302 from <<= 9; 1303 to <<= 9; 1304 } 1305 1306 memset(&cmd, 0, sizeof(struct mmc_command)); 1307 if (mmc_card_sd(card)) 1308 cmd.opcode = SD_ERASE_WR_BLK_START; 1309 else 1310 cmd.opcode = MMC_ERASE_GROUP_START; 1311 cmd.arg = from; 1312 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1313 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1314 if (err) { 1315 printk(KERN_ERR "mmc_erase: group start error %d, " 1316 "status %#x\n", err, cmd.resp[0]); 1317 err = -EINVAL; 1318 goto out; 1319 } 1320 1321 memset(&cmd, 0, sizeof(struct mmc_command)); 1322 if (mmc_card_sd(card)) 1323 cmd.opcode = SD_ERASE_WR_BLK_END; 1324 else 1325 cmd.opcode = MMC_ERASE_GROUP_END; 1326 cmd.arg = to; 1327 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1328 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1329 if (err) { 1330 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1331 err, cmd.resp[0]); 1332 err = -EINVAL; 1333 goto out; 1334 } 1335 1336 memset(&cmd, 0, sizeof(struct mmc_command)); 1337 cmd.opcode = MMC_ERASE; 1338 cmd.arg = arg; 1339 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1340 mmc_set_erase_timeout(card, &cmd, arg, qty); 1341 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1342 if (err) { 1343 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1344 err, cmd.resp[0]); 1345 err = -EIO; 1346 goto out; 1347 } 1348 1349 if (mmc_host_is_spi(card->host)) 1350 goto out; 1351 1352 do { 1353 memset(&cmd, 0, sizeof(struct mmc_command)); 1354 cmd.opcode = MMC_SEND_STATUS; 1355 cmd.arg = card->rca << 16; 1356 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1357 /* Do not retry else we can't see errors */ 1358 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1359 if (err || (cmd.resp[0] & 0xFDF92000)) { 1360 printk(KERN_ERR "error %d requesting status %#x\n", 1361 err, cmd.resp[0]); 1362 err = -EIO; 1363 goto out; 1364 } 1365 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1366 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1367 out: 1368 return err; 1369 } 1370 1371 /** 1372 * mmc_erase - erase sectors. 1373 * @card: card to erase 1374 * @from: first sector to erase 1375 * @nr: number of sectors to erase 1376 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1377 * 1378 * Caller must claim host before calling this function. 1379 */ 1380 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1381 unsigned int arg) 1382 { 1383 unsigned int rem, to = from + nr; 1384 1385 if (!(card->host->caps & MMC_CAP_ERASE) || 1386 !(card->csd.cmdclass & CCC_ERASE)) 1387 return -EOPNOTSUPP; 1388 1389 if (!card->erase_size) 1390 return -EOPNOTSUPP; 1391 1392 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1393 return -EOPNOTSUPP; 1394 1395 if ((arg & MMC_SECURE_ARGS) && 1396 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1397 return -EOPNOTSUPP; 1398 1399 if ((arg & MMC_TRIM_ARGS) && 1400 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1401 return -EOPNOTSUPP; 1402 1403 if (arg == MMC_SECURE_ERASE_ARG) { 1404 if (from % card->erase_size || nr % card->erase_size) 1405 return -EINVAL; 1406 } 1407 1408 if (arg == MMC_ERASE_ARG) { 1409 rem = from % card->erase_size; 1410 if (rem) { 1411 rem = card->erase_size - rem; 1412 from += rem; 1413 if (nr > rem) 1414 nr -= rem; 1415 else 1416 return 0; 1417 } 1418 rem = nr % card->erase_size; 1419 if (rem) 1420 nr -= rem; 1421 } 1422 1423 if (nr == 0) 1424 return 0; 1425 1426 to = from + nr; 1427 1428 if (to <= from) 1429 return -EINVAL; 1430 1431 /* 'from' and 'to' are inclusive */ 1432 to -= 1; 1433 1434 return mmc_do_erase(card, from, to, arg); 1435 } 1436 EXPORT_SYMBOL(mmc_erase); 1437 1438 int mmc_can_erase(struct mmc_card *card) 1439 { 1440 if ((card->host->caps & MMC_CAP_ERASE) && 1441 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1442 return 1; 1443 return 0; 1444 } 1445 EXPORT_SYMBOL(mmc_can_erase); 1446 1447 int mmc_can_trim(struct mmc_card *card) 1448 { 1449 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1450 return 1; 1451 return 0; 1452 } 1453 EXPORT_SYMBOL(mmc_can_trim); 1454 1455 int mmc_can_secure_erase_trim(struct mmc_card *card) 1456 { 1457 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1458 return 1; 1459 return 0; 1460 } 1461 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1462 1463 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1464 unsigned int nr) 1465 { 1466 if (!card->erase_size) 1467 return 0; 1468 if (from % card->erase_size || nr % card->erase_size) 1469 return 0; 1470 return 1; 1471 } 1472 EXPORT_SYMBOL(mmc_erase_group_aligned); 1473 1474 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1475 { 1476 struct mmc_command cmd; 1477 1478 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1479 return 0; 1480 1481 memset(&cmd, 0, sizeof(struct mmc_command)); 1482 cmd.opcode = MMC_SET_BLOCKLEN; 1483 cmd.arg = blocklen; 1484 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1485 return mmc_wait_for_cmd(card->host, &cmd, 5); 1486 } 1487 EXPORT_SYMBOL(mmc_set_blocklen); 1488 1489 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1490 { 1491 host->f_init = freq; 1492 1493 #ifdef CONFIG_MMC_DEBUG 1494 pr_info("%s: %s: trying to init card at %u Hz\n", 1495 mmc_hostname(host), __func__, host->f_init); 1496 #endif 1497 mmc_power_up(host); 1498 sdio_reset(host); 1499 mmc_go_idle(host); 1500 1501 mmc_send_if_cond(host, host->ocr_avail); 1502 1503 /* Order's important: probe SDIO, then SD, then MMC */ 1504 if (!mmc_attach_sdio(host)) 1505 return 0; 1506 if (!mmc_attach_sd(host)) 1507 return 0; 1508 if (!mmc_attach_mmc(host)) 1509 return 0; 1510 1511 mmc_power_off(host); 1512 return -EIO; 1513 } 1514 1515 void mmc_rescan(struct work_struct *work) 1516 { 1517 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 1518 struct mmc_host *host = 1519 container_of(work, struct mmc_host, detect.work); 1520 int i; 1521 1522 if (host->rescan_disable) 1523 return; 1524 1525 mmc_bus_get(host); 1526 1527 /* 1528 * if there is a _removable_ card registered, check whether it is 1529 * still present 1530 */ 1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1532 && !(host->caps & MMC_CAP_NONREMOVABLE)) 1533 host->bus_ops->detect(host); 1534 1535 /* 1536 * Let mmc_bus_put() free the bus/bus_ops if we've found that 1537 * the card is no longer present. 1538 */ 1539 mmc_bus_put(host); 1540 mmc_bus_get(host); 1541 1542 /* if there still is a card present, stop here */ 1543 if (host->bus_ops != NULL) { 1544 mmc_bus_put(host); 1545 goto out; 1546 } 1547 1548 /* 1549 * Only we can add a new handler, so it's safe to 1550 * release the lock here. 1551 */ 1552 mmc_bus_put(host); 1553 1554 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1555 goto out; 1556 1557 mmc_claim_host(host); 1558 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1559 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1560 break; 1561 if (freqs[i] < host->f_min) 1562 break; 1563 } 1564 mmc_release_host(host); 1565 1566 out: 1567 if (host->caps & MMC_CAP_NEEDS_POLL) 1568 mmc_schedule_delayed_work(&host->detect, HZ); 1569 } 1570 1571 void mmc_start_host(struct mmc_host *host) 1572 { 1573 mmc_power_off(host); 1574 mmc_detect_change(host, 0); 1575 } 1576 1577 void mmc_stop_host(struct mmc_host *host) 1578 { 1579 #ifdef CONFIG_MMC_DEBUG 1580 unsigned long flags; 1581 spin_lock_irqsave(&host->lock, flags); 1582 host->removed = 1; 1583 spin_unlock_irqrestore(&host->lock, flags); 1584 #endif 1585 1586 if (host->caps & MMC_CAP_DISABLE) 1587 cancel_delayed_work(&host->disable); 1588 cancel_delayed_work_sync(&host->detect); 1589 mmc_flush_scheduled_work(); 1590 1591 /* clear pm flags now and let card drivers set them as needed */ 1592 host->pm_flags = 0; 1593 1594 mmc_bus_get(host); 1595 if (host->bus_ops && !host->bus_dead) { 1596 if (host->bus_ops->remove) 1597 host->bus_ops->remove(host); 1598 1599 mmc_claim_host(host); 1600 mmc_detach_bus(host); 1601 mmc_release_host(host); 1602 mmc_bus_put(host); 1603 return; 1604 } 1605 mmc_bus_put(host); 1606 1607 BUG_ON(host->card); 1608 1609 mmc_power_off(host); 1610 } 1611 1612 int mmc_power_save_host(struct mmc_host *host) 1613 { 1614 int ret = 0; 1615 1616 mmc_bus_get(host); 1617 1618 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1619 mmc_bus_put(host); 1620 return -EINVAL; 1621 } 1622 1623 if (host->bus_ops->power_save) 1624 ret = host->bus_ops->power_save(host); 1625 1626 mmc_bus_put(host); 1627 1628 mmc_power_off(host); 1629 1630 return ret; 1631 } 1632 EXPORT_SYMBOL(mmc_power_save_host); 1633 1634 int mmc_power_restore_host(struct mmc_host *host) 1635 { 1636 int ret; 1637 1638 mmc_bus_get(host); 1639 1640 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1641 mmc_bus_put(host); 1642 return -EINVAL; 1643 } 1644 1645 mmc_power_up(host); 1646 ret = host->bus_ops->power_restore(host); 1647 1648 mmc_bus_put(host); 1649 1650 return ret; 1651 } 1652 EXPORT_SYMBOL(mmc_power_restore_host); 1653 1654 int mmc_card_awake(struct mmc_host *host) 1655 { 1656 int err = -ENOSYS; 1657 1658 mmc_bus_get(host); 1659 1660 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1661 err = host->bus_ops->awake(host); 1662 1663 mmc_bus_put(host); 1664 1665 return err; 1666 } 1667 EXPORT_SYMBOL(mmc_card_awake); 1668 1669 int mmc_card_sleep(struct mmc_host *host) 1670 { 1671 int err = -ENOSYS; 1672 1673 mmc_bus_get(host); 1674 1675 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1676 err = host->bus_ops->sleep(host); 1677 1678 mmc_bus_put(host); 1679 1680 return err; 1681 } 1682 EXPORT_SYMBOL(mmc_card_sleep); 1683 1684 int mmc_card_can_sleep(struct mmc_host *host) 1685 { 1686 struct mmc_card *card = host->card; 1687 1688 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1689 return 1; 1690 return 0; 1691 } 1692 EXPORT_SYMBOL(mmc_card_can_sleep); 1693 1694 #ifdef CONFIG_PM 1695 1696 /** 1697 * mmc_suspend_host - suspend a host 1698 * @host: mmc host 1699 */ 1700 int mmc_suspend_host(struct mmc_host *host) 1701 { 1702 int err = 0; 1703 1704 if (host->caps & MMC_CAP_DISABLE) 1705 cancel_delayed_work(&host->disable); 1706 cancel_delayed_work(&host->detect); 1707 mmc_flush_scheduled_work(); 1708 1709 mmc_bus_get(host); 1710 if (host->bus_ops && !host->bus_dead) { 1711 if (host->bus_ops->suspend) 1712 err = host->bus_ops->suspend(host); 1713 if (err == -ENOSYS || !host->bus_ops->resume) { 1714 /* 1715 * We simply "remove" the card in this case. 1716 * It will be redetected on resume. 1717 */ 1718 if (host->bus_ops->remove) 1719 host->bus_ops->remove(host); 1720 mmc_claim_host(host); 1721 mmc_detach_bus(host); 1722 mmc_release_host(host); 1723 host->pm_flags = 0; 1724 err = 0; 1725 } 1726 } 1727 mmc_bus_put(host); 1728 1729 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1730 mmc_power_off(host); 1731 1732 return err; 1733 } 1734 1735 EXPORT_SYMBOL(mmc_suspend_host); 1736 1737 /** 1738 * mmc_resume_host - resume a previously suspended host 1739 * @host: mmc host 1740 */ 1741 int mmc_resume_host(struct mmc_host *host) 1742 { 1743 int err = 0; 1744 1745 mmc_bus_get(host); 1746 if (host->bus_ops && !host->bus_dead) { 1747 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1748 mmc_power_up(host); 1749 mmc_select_voltage(host, host->ocr); 1750 /* 1751 * Tell runtime PM core we just powered up the card, 1752 * since it still believes the card is powered off. 1753 * Note that currently runtime PM is only enabled 1754 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 1755 */ 1756 if (mmc_card_sdio(host->card) && 1757 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 1758 pm_runtime_disable(&host->card->dev); 1759 pm_runtime_set_active(&host->card->dev); 1760 pm_runtime_enable(&host->card->dev); 1761 } 1762 } 1763 BUG_ON(!host->bus_ops->resume); 1764 err = host->bus_ops->resume(host); 1765 if (err) { 1766 printk(KERN_WARNING "%s: error %d during resume " 1767 "(card was removed?)\n", 1768 mmc_hostname(host), err); 1769 err = 0; 1770 } 1771 } 1772 mmc_bus_put(host); 1773 1774 return err; 1775 } 1776 EXPORT_SYMBOL(mmc_resume_host); 1777 1778 /* Do the card removal on suspend if card is assumed removeable 1779 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 1780 to sync the card. 1781 */ 1782 int mmc_pm_notify(struct notifier_block *notify_block, 1783 unsigned long mode, void *unused) 1784 { 1785 struct mmc_host *host = container_of( 1786 notify_block, struct mmc_host, pm_notify); 1787 unsigned long flags; 1788 1789 1790 switch (mode) { 1791 case PM_HIBERNATION_PREPARE: 1792 case PM_SUSPEND_PREPARE: 1793 1794 spin_lock_irqsave(&host->lock, flags); 1795 host->rescan_disable = 1; 1796 spin_unlock_irqrestore(&host->lock, flags); 1797 cancel_delayed_work_sync(&host->detect); 1798 1799 if (!host->bus_ops || host->bus_ops->suspend) 1800 break; 1801 1802 mmc_claim_host(host); 1803 1804 if (host->bus_ops->remove) 1805 host->bus_ops->remove(host); 1806 1807 mmc_detach_bus(host); 1808 mmc_release_host(host); 1809 host->pm_flags = 0; 1810 break; 1811 1812 case PM_POST_SUSPEND: 1813 case PM_POST_HIBERNATION: 1814 case PM_POST_RESTORE: 1815 1816 spin_lock_irqsave(&host->lock, flags); 1817 host->rescan_disable = 0; 1818 spin_unlock_irqrestore(&host->lock, flags); 1819 mmc_detect_change(host, 0); 1820 1821 } 1822 1823 return 0; 1824 } 1825 #endif 1826 1827 static int __init mmc_init(void) 1828 { 1829 int ret; 1830 1831 workqueue = alloc_ordered_workqueue("kmmcd", 0); 1832 if (!workqueue) 1833 return -ENOMEM; 1834 1835 ret = mmc_register_bus(); 1836 if (ret) 1837 goto destroy_workqueue; 1838 1839 ret = mmc_register_host_class(); 1840 if (ret) 1841 goto unregister_bus; 1842 1843 ret = sdio_register_bus(); 1844 if (ret) 1845 goto unregister_host_class; 1846 1847 return 0; 1848 1849 unregister_host_class: 1850 mmc_unregister_host_class(); 1851 unregister_bus: 1852 mmc_unregister_bus(); 1853 destroy_workqueue: 1854 destroy_workqueue(workqueue); 1855 1856 return ret; 1857 } 1858 1859 static void __exit mmc_exit(void) 1860 { 1861 sdio_unregister_bus(); 1862 mmc_unregister_host_class(); 1863 mmc_unregister_bus(); 1864 destroy_workqueue(workqueue); 1865 } 1866 1867 subsys_initcall(mmc_init); 1868 module_exit(mmc_exit); 1869 1870 MODULE_LICENSE("GPL"); 1871