1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/host.h> 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/sd.h> 31 32 #include "core.h" 33 #include "bus.h" 34 #include "host.h" 35 #include "sdio_bus.h" 36 37 #include "mmc_ops.h" 38 #include "sd_ops.h" 39 #include "sdio_ops.h" 40 41 static struct workqueue_struct *workqueue; 42 43 /* 44 * Enabling software CRCs on the data blocks can be a significant (30%) 45 * performance cost, and for other reasons may not always be desired. 46 * So we allow it it to be disabled. 47 */ 48 int use_spi_crc = 1; 49 module_param(use_spi_crc, bool, 0); 50 51 /* 52 * We normally treat cards as removed during suspend if they are not 53 * known to be on a non-removable bus, to avoid the risk of writing 54 * back data to a different card after resume. Allow this to be 55 * overridden if necessary. 56 */ 57 #ifdef CONFIG_MMC_UNSAFE_RESUME 58 int mmc_assume_removable; 59 #else 60 int mmc_assume_removable = 1; 61 #endif 62 EXPORT_SYMBOL(mmc_assume_removable); 63 module_param_named(removable, mmc_assume_removable, bool, 0644); 64 MODULE_PARM_DESC( 65 removable, 66 "MMC/SD cards are removable and may be removed during suspend"); 67 68 /* 69 * Internal function. Schedule delayed work in the MMC work queue. 70 */ 71 static int mmc_schedule_delayed_work(struct delayed_work *work, 72 unsigned long delay) 73 { 74 return queue_delayed_work(workqueue, work, delay); 75 } 76 77 /* 78 * Internal function. Flush all scheduled work from the MMC work queue. 79 */ 80 static void mmc_flush_scheduled_work(void) 81 { 82 flush_workqueue(workqueue); 83 } 84 85 /** 86 * mmc_request_done - finish processing an MMC request 87 * @host: MMC host which completed request 88 * @mrq: MMC request which request 89 * 90 * MMC drivers should call this function when they have completed 91 * their processing of a request. 92 */ 93 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 94 { 95 struct mmc_command *cmd = mrq->cmd; 96 int err = cmd->error; 97 98 if (err && cmd->retries && mmc_host_is_spi(host)) { 99 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 100 cmd->retries = 0; 101 } 102 103 if (err && cmd->retries) { 104 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 105 mmc_hostname(host), cmd->opcode, err); 106 107 cmd->retries--; 108 cmd->error = 0; 109 host->ops->request(host, mrq); 110 } else { 111 led_trigger_event(host->led, LED_OFF); 112 113 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 114 mmc_hostname(host), cmd->opcode, err, 115 cmd->resp[0], cmd->resp[1], 116 cmd->resp[2], cmd->resp[3]); 117 118 if (mrq->data) { 119 pr_debug("%s: %d bytes transferred: %d\n", 120 mmc_hostname(host), 121 mrq->data->bytes_xfered, mrq->data->error); 122 } 123 124 if (mrq->stop) { 125 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 126 mmc_hostname(host), mrq->stop->opcode, 127 mrq->stop->error, 128 mrq->stop->resp[0], mrq->stop->resp[1], 129 mrq->stop->resp[2], mrq->stop->resp[3]); 130 } 131 132 if (mrq->done) 133 mrq->done(mrq); 134 135 mmc_host_clk_gate(host); 136 } 137 } 138 139 EXPORT_SYMBOL(mmc_request_done); 140 141 static void 142 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 143 { 144 #ifdef CONFIG_MMC_DEBUG 145 unsigned int i, sz; 146 struct scatterlist *sg; 147 #endif 148 149 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 150 mmc_hostname(host), mrq->cmd->opcode, 151 mrq->cmd->arg, mrq->cmd->flags); 152 153 if (mrq->data) { 154 pr_debug("%s: blksz %d blocks %d flags %08x " 155 "tsac %d ms nsac %d\n", 156 mmc_hostname(host), mrq->data->blksz, 157 mrq->data->blocks, mrq->data->flags, 158 mrq->data->timeout_ns / 1000000, 159 mrq->data->timeout_clks); 160 } 161 162 if (mrq->stop) { 163 pr_debug("%s: CMD%u arg %08x flags %08x\n", 164 mmc_hostname(host), mrq->stop->opcode, 165 mrq->stop->arg, mrq->stop->flags); 166 } 167 168 WARN_ON(!host->claimed); 169 170 mrq->cmd->error = 0; 171 mrq->cmd->mrq = mrq; 172 if (mrq->data) { 173 BUG_ON(mrq->data->blksz > host->max_blk_size); 174 BUG_ON(mrq->data->blocks > host->max_blk_count); 175 BUG_ON(mrq->data->blocks * mrq->data->blksz > 176 host->max_req_size); 177 178 #ifdef CONFIG_MMC_DEBUG 179 sz = 0; 180 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 181 sz += sg->length; 182 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 183 #endif 184 185 mrq->cmd->data = mrq->data; 186 mrq->data->error = 0; 187 mrq->data->mrq = mrq; 188 if (mrq->stop) { 189 mrq->data->stop = mrq->stop; 190 mrq->stop->error = 0; 191 mrq->stop->mrq = mrq; 192 } 193 } 194 mmc_host_clk_ungate(host); 195 led_trigger_event(host->led, LED_FULL); 196 host->ops->request(host, mrq); 197 } 198 199 static void mmc_wait_done(struct mmc_request *mrq) 200 { 201 complete(&mrq->completion); 202 } 203 204 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 205 { 206 init_completion(&mrq->completion); 207 mrq->done = mmc_wait_done; 208 mmc_start_request(host, mrq); 209 } 210 211 static void mmc_wait_for_req_done(struct mmc_host *host, 212 struct mmc_request *mrq) 213 { 214 wait_for_completion(&mrq->completion); 215 } 216 217 /** 218 * mmc_pre_req - Prepare for a new request 219 * @host: MMC host to prepare command 220 * @mrq: MMC request to prepare for 221 * @is_first_req: true if there is no previous started request 222 * that may run in parellel to this call, otherwise false 223 * 224 * mmc_pre_req() is called in prior to mmc_start_req() to let 225 * host prepare for the new request. Preparation of a request may be 226 * performed while another request is running on the host. 227 */ 228 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 229 bool is_first_req) 230 { 231 if (host->ops->pre_req) 232 host->ops->pre_req(host, mrq, is_first_req); 233 } 234 235 /** 236 * mmc_post_req - Post process a completed request 237 * @host: MMC host to post process command 238 * @mrq: MMC request to post process for 239 * @err: Error, if non zero, clean up any resources made in pre_req 240 * 241 * Let the host post process a completed request. Post processing of 242 * a request may be performed while another reuqest is running. 243 */ 244 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 245 int err) 246 { 247 if (host->ops->post_req) 248 host->ops->post_req(host, mrq, err); 249 } 250 251 /** 252 * mmc_start_req - start a non-blocking request 253 * @host: MMC host to start command 254 * @areq: async request to start 255 * @error: out parameter returns 0 for success, otherwise non zero 256 * 257 * Start a new MMC custom command request for a host. 258 * If there is on ongoing async request wait for completion 259 * of that request and start the new one and return. 260 * Does not wait for the new request to complete. 261 * 262 * Returns the completed request, NULL in case of none completed. 263 * Wait for the an ongoing request (previoulsy started) to complete and 264 * return the completed request. If there is no ongoing request, NULL 265 * is returned without waiting. NULL is not an error condition. 266 */ 267 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 268 struct mmc_async_req *areq, int *error) 269 { 270 int err = 0; 271 struct mmc_async_req *data = host->areq; 272 273 /* Prepare a new request */ 274 if (areq) 275 mmc_pre_req(host, areq->mrq, !host->areq); 276 277 if (host->areq) { 278 mmc_wait_for_req_done(host, host->areq->mrq); 279 err = host->areq->err_check(host->card, host->areq); 280 if (err) { 281 mmc_post_req(host, host->areq->mrq, 0); 282 if (areq) 283 mmc_post_req(host, areq->mrq, -EINVAL); 284 285 host->areq = NULL; 286 goto out; 287 } 288 } 289 290 if (areq) 291 __mmc_start_req(host, areq->mrq); 292 293 if (host->areq) 294 mmc_post_req(host, host->areq->mrq, 0); 295 296 host->areq = areq; 297 out: 298 if (error) 299 *error = err; 300 return data; 301 } 302 EXPORT_SYMBOL(mmc_start_req); 303 304 /** 305 * mmc_wait_for_req - start a request and wait for completion 306 * @host: MMC host to start command 307 * @mrq: MMC request to start 308 * 309 * Start a new MMC custom command request for a host, and wait 310 * for the command to complete. Does not attempt to parse the 311 * response. 312 */ 313 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 314 { 315 __mmc_start_req(host, mrq); 316 mmc_wait_for_req_done(host, mrq); 317 } 318 EXPORT_SYMBOL(mmc_wait_for_req); 319 320 /** 321 * mmc_wait_for_cmd - start a command and wait for completion 322 * @host: MMC host to start command 323 * @cmd: MMC command to start 324 * @retries: maximum number of retries 325 * 326 * Start a new MMC command for a host, and wait for the command 327 * to complete. Return any error that occurred while the command 328 * was executing. Do not attempt to parse the response. 329 */ 330 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 331 { 332 struct mmc_request mrq = {0}; 333 334 WARN_ON(!host->claimed); 335 336 memset(cmd->resp, 0, sizeof(cmd->resp)); 337 cmd->retries = retries; 338 339 mrq.cmd = cmd; 340 cmd->data = NULL; 341 342 mmc_wait_for_req(host, &mrq); 343 344 return cmd->error; 345 } 346 347 EXPORT_SYMBOL(mmc_wait_for_cmd); 348 349 /** 350 * mmc_set_data_timeout - set the timeout for a data command 351 * @data: data phase for command 352 * @card: the MMC card associated with the data transfer 353 * 354 * Computes the data timeout parameters according to the 355 * correct algorithm given the card type. 356 */ 357 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 358 { 359 unsigned int mult; 360 361 /* 362 * SDIO cards only define an upper 1 s limit on access. 363 */ 364 if (mmc_card_sdio(card)) { 365 data->timeout_ns = 1000000000; 366 data->timeout_clks = 0; 367 return; 368 } 369 370 /* 371 * SD cards use a 100 multiplier rather than 10 372 */ 373 mult = mmc_card_sd(card) ? 100 : 10; 374 375 /* 376 * Scale up the multiplier (and therefore the timeout) by 377 * the r2w factor for writes. 378 */ 379 if (data->flags & MMC_DATA_WRITE) 380 mult <<= card->csd.r2w_factor; 381 382 data->timeout_ns = card->csd.tacc_ns * mult; 383 data->timeout_clks = card->csd.tacc_clks * mult; 384 385 /* 386 * SD cards also have an upper limit on the timeout. 387 */ 388 if (mmc_card_sd(card)) { 389 unsigned int timeout_us, limit_us; 390 391 timeout_us = data->timeout_ns / 1000; 392 if (mmc_host_clk_rate(card->host)) 393 timeout_us += data->timeout_clks * 1000 / 394 (mmc_host_clk_rate(card->host) / 1000); 395 396 if (data->flags & MMC_DATA_WRITE) 397 /* 398 * The limit is really 250 ms, but that is 399 * insufficient for some crappy cards. 400 */ 401 limit_us = 300000; 402 else 403 limit_us = 100000; 404 405 /* 406 * SDHC cards always use these fixed values. 407 */ 408 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 409 data->timeout_ns = limit_us * 1000; 410 data->timeout_clks = 0; 411 } 412 } 413 /* 414 * Some cards need very high timeouts if driven in SPI mode. 415 * The worst observed timeout was 900ms after writing a 416 * continuous stream of data until the internal logic 417 * overflowed. 418 */ 419 if (mmc_host_is_spi(card->host)) { 420 if (data->flags & MMC_DATA_WRITE) { 421 if (data->timeout_ns < 1000000000) 422 data->timeout_ns = 1000000000; /* 1s */ 423 } else { 424 if (data->timeout_ns < 100000000) 425 data->timeout_ns = 100000000; /* 100ms */ 426 } 427 } 428 } 429 EXPORT_SYMBOL(mmc_set_data_timeout); 430 431 /** 432 * mmc_align_data_size - pads a transfer size to a more optimal value 433 * @card: the MMC card associated with the data transfer 434 * @sz: original transfer size 435 * 436 * Pads the original data size with a number of extra bytes in 437 * order to avoid controller bugs and/or performance hits 438 * (e.g. some controllers revert to PIO for certain sizes). 439 * 440 * Returns the improved size, which might be unmodified. 441 * 442 * Note that this function is only relevant when issuing a 443 * single scatter gather entry. 444 */ 445 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 446 { 447 /* 448 * FIXME: We don't have a system for the controller to tell 449 * the core about its problems yet, so for now we just 32-bit 450 * align the size. 451 */ 452 sz = ((sz + 3) / 4) * 4; 453 454 return sz; 455 } 456 EXPORT_SYMBOL(mmc_align_data_size); 457 458 /** 459 * mmc_host_enable - enable a host. 460 * @host: mmc host to enable 461 * 462 * Hosts that support power saving can use the 'enable' and 'disable' 463 * methods to exit and enter power saving states. For more information 464 * see comments for struct mmc_host_ops. 465 */ 466 int mmc_host_enable(struct mmc_host *host) 467 { 468 if (!(host->caps & MMC_CAP_DISABLE)) 469 return 0; 470 471 if (host->en_dis_recurs) 472 return 0; 473 474 if (host->nesting_cnt++) 475 return 0; 476 477 cancel_delayed_work_sync(&host->disable); 478 479 if (host->enabled) 480 return 0; 481 482 if (host->ops->enable) { 483 int err; 484 485 host->en_dis_recurs = 1; 486 err = host->ops->enable(host); 487 host->en_dis_recurs = 0; 488 489 if (err) { 490 pr_debug("%s: enable error %d\n", 491 mmc_hostname(host), err); 492 return err; 493 } 494 } 495 host->enabled = 1; 496 return 0; 497 } 498 EXPORT_SYMBOL(mmc_host_enable); 499 500 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 501 { 502 if (host->ops->disable) { 503 int err; 504 505 host->en_dis_recurs = 1; 506 err = host->ops->disable(host, lazy); 507 host->en_dis_recurs = 0; 508 509 if (err < 0) { 510 pr_debug("%s: disable error %d\n", 511 mmc_hostname(host), err); 512 return err; 513 } 514 if (err > 0) { 515 unsigned long delay = msecs_to_jiffies(err); 516 517 mmc_schedule_delayed_work(&host->disable, delay); 518 } 519 } 520 host->enabled = 0; 521 return 0; 522 } 523 524 /** 525 * mmc_host_disable - disable a host. 526 * @host: mmc host to disable 527 * 528 * Hosts that support power saving can use the 'enable' and 'disable' 529 * methods to exit and enter power saving states. For more information 530 * see comments for struct mmc_host_ops. 531 */ 532 int mmc_host_disable(struct mmc_host *host) 533 { 534 int err; 535 536 if (!(host->caps & MMC_CAP_DISABLE)) 537 return 0; 538 539 if (host->en_dis_recurs) 540 return 0; 541 542 if (--host->nesting_cnt) 543 return 0; 544 545 if (!host->enabled) 546 return 0; 547 548 err = mmc_host_do_disable(host, 0); 549 return err; 550 } 551 EXPORT_SYMBOL(mmc_host_disable); 552 553 /** 554 * __mmc_claim_host - exclusively claim a host 555 * @host: mmc host to claim 556 * @abort: whether or not the operation should be aborted 557 * 558 * Claim a host for a set of operations. If @abort is non null and 559 * dereference a non-zero value then this will return prematurely with 560 * that non-zero value without acquiring the lock. Returns zero 561 * with the lock held otherwise. 562 */ 563 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 564 { 565 DECLARE_WAITQUEUE(wait, current); 566 unsigned long flags; 567 int stop; 568 569 might_sleep(); 570 571 add_wait_queue(&host->wq, &wait); 572 spin_lock_irqsave(&host->lock, flags); 573 while (1) { 574 set_current_state(TASK_UNINTERRUPTIBLE); 575 stop = abort ? atomic_read(abort) : 0; 576 if (stop || !host->claimed || host->claimer == current) 577 break; 578 spin_unlock_irqrestore(&host->lock, flags); 579 schedule(); 580 spin_lock_irqsave(&host->lock, flags); 581 } 582 set_current_state(TASK_RUNNING); 583 if (!stop) { 584 host->claimed = 1; 585 host->claimer = current; 586 host->claim_cnt += 1; 587 } else 588 wake_up(&host->wq); 589 spin_unlock_irqrestore(&host->lock, flags); 590 remove_wait_queue(&host->wq, &wait); 591 if (!stop) 592 mmc_host_enable(host); 593 return stop; 594 } 595 596 EXPORT_SYMBOL(__mmc_claim_host); 597 598 /** 599 * mmc_try_claim_host - try exclusively to claim a host 600 * @host: mmc host to claim 601 * 602 * Returns %1 if the host is claimed, %0 otherwise. 603 */ 604 int mmc_try_claim_host(struct mmc_host *host) 605 { 606 int claimed_host = 0; 607 unsigned long flags; 608 609 spin_lock_irqsave(&host->lock, flags); 610 if (!host->claimed || host->claimer == current) { 611 host->claimed = 1; 612 host->claimer = current; 613 host->claim_cnt += 1; 614 claimed_host = 1; 615 } 616 spin_unlock_irqrestore(&host->lock, flags); 617 return claimed_host; 618 } 619 EXPORT_SYMBOL(mmc_try_claim_host); 620 621 /** 622 * mmc_do_release_host - release a claimed host 623 * @host: mmc host to release 624 * 625 * If you successfully claimed a host, this function will 626 * release it again. 627 */ 628 void mmc_do_release_host(struct mmc_host *host) 629 { 630 unsigned long flags; 631 632 spin_lock_irqsave(&host->lock, flags); 633 if (--host->claim_cnt) { 634 /* Release for nested claim */ 635 spin_unlock_irqrestore(&host->lock, flags); 636 } else { 637 host->claimed = 0; 638 host->claimer = NULL; 639 spin_unlock_irqrestore(&host->lock, flags); 640 wake_up(&host->wq); 641 } 642 } 643 EXPORT_SYMBOL(mmc_do_release_host); 644 645 void mmc_host_deeper_disable(struct work_struct *work) 646 { 647 struct mmc_host *host = 648 container_of(work, struct mmc_host, disable.work); 649 650 /* If the host is claimed then we do not want to disable it anymore */ 651 if (!mmc_try_claim_host(host)) 652 return; 653 mmc_host_do_disable(host, 1); 654 mmc_do_release_host(host); 655 } 656 657 /** 658 * mmc_host_lazy_disable - lazily disable a host. 659 * @host: mmc host to disable 660 * 661 * Hosts that support power saving can use the 'enable' and 'disable' 662 * methods to exit and enter power saving states. For more information 663 * see comments for struct mmc_host_ops. 664 */ 665 int mmc_host_lazy_disable(struct mmc_host *host) 666 { 667 if (!(host->caps & MMC_CAP_DISABLE)) 668 return 0; 669 670 if (host->en_dis_recurs) 671 return 0; 672 673 if (--host->nesting_cnt) 674 return 0; 675 676 if (!host->enabled) 677 return 0; 678 679 if (host->disable_delay) { 680 mmc_schedule_delayed_work(&host->disable, 681 msecs_to_jiffies(host->disable_delay)); 682 return 0; 683 } else 684 return mmc_host_do_disable(host, 1); 685 } 686 EXPORT_SYMBOL(mmc_host_lazy_disable); 687 688 /** 689 * mmc_release_host - release a host 690 * @host: mmc host to release 691 * 692 * Release a MMC host, allowing others to claim the host 693 * for their operations. 694 */ 695 void mmc_release_host(struct mmc_host *host) 696 { 697 WARN_ON(!host->claimed); 698 699 mmc_host_lazy_disable(host); 700 701 mmc_do_release_host(host); 702 } 703 704 EXPORT_SYMBOL(mmc_release_host); 705 706 /* 707 * Internal function that does the actual ios call to the host driver, 708 * optionally printing some debug output. 709 */ 710 static inline void mmc_set_ios(struct mmc_host *host) 711 { 712 struct mmc_ios *ios = &host->ios; 713 714 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 715 "width %u timing %u\n", 716 mmc_hostname(host), ios->clock, ios->bus_mode, 717 ios->power_mode, ios->chip_select, ios->vdd, 718 ios->bus_width, ios->timing); 719 720 if (ios->clock > 0) 721 mmc_set_ungated(host); 722 host->ops->set_ios(host, ios); 723 } 724 725 /* 726 * Control chip select pin on a host. 727 */ 728 void mmc_set_chip_select(struct mmc_host *host, int mode) 729 { 730 host->ios.chip_select = mode; 731 mmc_set_ios(host); 732 } 733 734 /* 735 * Sets the host clock to the highest possible frequency that 736 * is below "hz". 737 */ 738 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 739 { 740 WARN_ON(hz < host->f_min); 741 742 if (hz > host->f_max) 743 hz = host->f_max; 744 745 host->ios.clock = hz; 746 mmc_set_ios(host); 747 } 748 749 #ifdef CONFIG_MMC_CLKGATE 750 /* 751 * This gates the clock by setting it to 0 Hz. 752 */ 753 void mmc_gate_clock(struct mmc_host *host) 754 { 755 unsigned long flags; 756 757 spin_lock_irqsave(&host->clk_lock, flags); 758 host->clk_old = host->ios.clock; 759 host->ios.clock = 0; 760 host->clk_gated = true; 761 spin_unlock_irqrestore(&host->clk_lock, flags); 762 mmc_set_ios(host); 763 } 764 765 /* 766 * This restores the clock from gating by using the cached 767 * clock value. 768 */ 769 void mmc_ungate_clock(struct mmc_host *host) 770 { 771 /* 772 * We should previously have gated the clock, so the clock shall 773 * be 0 here! The clock may however be 0 during initialization, 774 * when some request operations are performed before setting 775 * the frequency. When ungate is requested in that situation 776 * we just ignore the call. 777 */ 778 if (host->clk_old) { 779 BUG_ON(host->ios.clock); 780 /* This call will also set host->clk_gated to false */ 781 mmc_set_clock(host, host->clk_old); 782 } 783 } 784 785 void mmc_set_ungated(struct mmc_host *host) 786 { 787 unsigned long flags; 788 789 /* 790 * We've been given a new frequency while the clock is gated, 791 * so make sure we regard this as ungating it. 792 */ 793 spin_lock_irqsave(&host->clk_lock, flags); 794 host->clk_gated = false; 795 spin_unlock_irqrestore(&host->clk_lock, flags); 796 } 797 798 #else 799 void mmc_set_ungated(struct mmc_host *host) 800 { 801 } 802 #endif 803 804 /* 805 * Change the bus mode (open drain/push-pull) of a host. 806 */ 807 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 808 { 809 host->ios.bus_mode = mode; 810 mmc_set_ios(host); 811 } 812 813 /* 814 * Change data bus width of a host. 815 */ 816 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 817 { 818 host->ios.bus_width = width; 819 mmc_set_ios(host); 820 } 821 822 /** 823 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 824 * @vdd: voltage (mV) 825 * @low_bits: prefer low bits in boundary cases 826 * 827 * This function returns the OCR bit number according to the provided @vdd 828 * value. If conversion is not possible a negative errno value returned. 829 * 830 * Depending on the @low_bits flag the function prefers low or high OCR bits 831 * on boundary voltages. For example, 832 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 833 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 834 * 835 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 836 */ 837 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 838 { 839 const int max_bit = ilog2(MMC_VDD_35_36); 840 int bit; 841 842 if (vdd < 1650 || vdd > 3600) 843 return -EINVAL; 844 845 if (vdd >= 1650 && vdd <= 1950) 846 return ilog2(MMC_VDD_165_195); 847 848 if (low_bits) 849 vdd -= 1; 850 851 /* Base 2000 mV, step 100 mV, bit's base 8. */ 852 bit = (vdd - 2000) / 100 + 8; 853 if (bit > max_bit) 854 return max_bit; 855 return bit; 856 } 857 858 /** 859 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 860 * @vdd_min: minimum voltage value (mV) 861 * @vdd_max: maximum voltage value (mV) 862 * 863 * This function returns the OCR mask bits according to the provided @vdd_min 864 * and @vdd_max values. If conversion is not possible the function returns 0. 865 * 866 * Notes wrt boundary cases: 867 * This function sets the OCR bits for all boundary voltages, for example 868 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 869 * MMC_VDD_34_35 mask. 870 */ 871 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 872 { 873 u32 mask = 0; 874 875 if (vdd_max < vdd_min) 876 return 0; 877 878 /* Prefer high bits for the boundary vdd_max values. */ 879 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 880 if (vdd_max < 0) 881 return 0; 882 883 /* Prefer low bits for the boundary vdd_min values. */ 884 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 885 if (vdd_min < 0) 886 return 0; 887 888 /* Fill the mask, from max bit to min bit. */ 889 while (vdd_max >= vdd_min) 890 mask |= 1 << vdd_max--; 891 892 return mask; 893 } 894 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 895 896 #ifdef CONFIG_REGULATOR 897 898 /** 899 * mmc_regulator_get_ocrmask - return mask of supported voltages 900 * @supply: regulator to use 901 * 902 * This returns either a negative errno, or a mask of voltages that 903 * can be provided to MMC/SD/SDIO devices using the specified voltage 904 * regulator. This would normally be called before registering the 905 * MMC host adapter. 906 */ 907 int mmc_regulator_get_ocrmask(struct regulator *supply) 908 { 909 int result = 0; 910 int count; 911 int i; 912 913 count = regulator_count_voltages(supply); 914 if (count < 0) 915 return count; 916 917 for (i = 0; i < count; i++) { 918 int vdd_uV; 919 int vdd_mV; 920 921 vdd_uV = regulator_list_voltage(supply, i); 922 if (vdd_uV <= 0) 923 continue; 924 925 vdd_mV = vdd_uV / 1000; 926 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 927 } 928 929 return result; 930 } 931 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 932 933 /** 934 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 935 * @mmc: the host to regulate 936 * @supply: regulator to use 937 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 938 * 939 * Returns zero on success, else negative errno. 940 * 941 * MMC host drivers may use this to enable or disable a regulator using 942 * a particular supply voltage. This would normally be called from the 943 * set_ios() method. 944 */ 945 int mmc_regulator_set_ocr(struct mmc_host *mmc, 946 struct regulator *supply, 947 unsigned short vdd_bit) 948 { 949 int result = 0; 950 int min_uV, max_uV; 951 952 if (vdd_bit) { 953 int tmp; 954 int voltage; 955 956 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 957 * bits this regulator doesn't quite support ... don't 958 * be too picky, most cards and regulators are OK with 959 * a 0.1V range goof (it's a small error percentage). 960 */ 961 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 962 if (tmp == 0) { 963 min_uV = 1650 * 1000; 964 max_uV = 1950 * 1000; 965 } else { 966 min_uV = 1900 * 1000 + tmp * 100 * 1000; 967 max_uV = min_uV + 100 * 1000; 968 } 969 970 /* avoid needless changes to this voltage; the regulator 971 * might not allow this operation 972 */ 973 voltage = regulator_get_voltage(supply); 974 if (voltage < 0) 975 result = voltage; 976 else if (voltage < min_uV || voltage > max_uV) 977 result = regulator_set_voltage(supply, min_uV, max_uV); 978 else 979 result = 0; 980 981 if (result == 0 && !mmc->regulator_enabled) { 982 result = regulator_enable(supply); 983 if (!result) 984 mmc->regulator_enabled = true; 985 } 986 } else if (mmc->regulator_enabled) { 987 result = regulator_disable(supply); 988 if (result == 0) 989 mmc->regulator_enabled = false; 990 } 991 992 if (result) 993 dev_err(mmc_dev(mmc), 994 "could not set regulator OCR (%d)\n", result); 995 return result; 996 } 997 EXPORT_SYMBOL(mmc_regulator_set_ocr); 998 999 #endif /* CONFIG_REGULATOR */ 1000 1001 /* 1002 * Mask off any voltages we don't support and select 1003 * the lowest voltage 1004 */ 1005 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1006 { 1007 int bit; 1008 1009 ocr &= host->ocr_avail; 1010 1011 bit = ffs(ocr); 1012 if (bit) { 1013 bit -= 1; 1014 1015 ocr &= 3 << bit; 1016 1017 host->ios.vdd = bit; 1018 mmc_set_ios(host); 1019 } else { 1020 pr_warning("%s: host doesn't support card's voltages\n", 1021 mmc_hostname(host)); 1022 ocr = 0; 1023 } 1024 1025 return ocr; 1026 } 1027 1028 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) 1029 { 1030 struct mmc_command cmd = {0}; 1031 int err = 0; 1032 1033 BUG_ON(!host); 1034 1035 /* 1036 * Send CMD11 only if the request is to switch the card to 1037 * 1.8V signalling. 1038 */ 1039 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { 1040 cmd.opcode = SD_SWITCH_VOLTAGE; 1041 cmd.arg = 0; 1042 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1043 1044 err = mmc_wait_for_cmd(host, &cmd, 0); 1045 if (err) 1046 return err; 1047 1048 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1049 return -EIO; 1050 } 1051 1052 host->ios.signal_voltage = signal_voltage; 1053 1054 if (host->ops->start_signal_voltage_switch) 1055 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1056 1057 return err; 1058 } 1059 1060 /* 1061 * Select timing parameters for host. 1062 */ 1063 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1064 { 1065 host->ios.timing = timing; 1066 mmc_set_ios(host); 1067 } 1068 1069 /* 1070 * Select appropriate driver type for host. 1071 */ 1072 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1073 { 1074 host->ios.drv_type = drv_type; 1075 mmc_set_ios(host); 1076 } 1077 1078 /* 1079 * Apply power to the MMC stack. This is a two-stage process. 1080 * First, we enable power to the card without the clock running. 1081 * We then wait a bit for the power to stabilise. Finally, 1082 * enable the bus drivers and clock to the card. 1083 * 1084 * We must _NOT_ enable the clock prior to power stablising. 1085 * 1086 * If a host does all the power sequencing itself, ignore the 1087 * initial MMC_POWER_UP stage. 1088 */ 1089 static void mmc_power_up(struct mmc_host *host) 1090 { 1091 int bit; 1092 1093 /* If ocr is set, we use it */ 1094 if (host->ocr) 1095 bit = ffs(host->ocr) - 1; 1096 else 1097 bit = fls(host->ocr_avail) - 1; 1098 1099 host->ios.vdd = bit; 1100 if (mmc_host_is_spi(host)) { 1101 host->ios.chip_select = MMC_CS_HIGH; 1102 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1103 } else { 1104 host->ios.chip_select = MMC_CS_DONTCARE; 1105 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1106 } 1107 host->ios.power_mode = MMC_POWER_UP; 1108 host->ios.bus_width = MMC_BUS_WIDTH_1; 1109 host->ios.timing = MMC_TIMING_LEGACY; 1110 mmc_set_ios(host); 1111 1112 /* 1113 * This delay should be sufficient to allow the power supply 1114 * to reach the minimum voltage. 1115 */ 1116 mmc_delay(10); 1117 1118 host->ios.clock = host->f_init; 1119 1120 host->ios.power_mode = MMC_POWER_ON; 1121 mmc_set_ios(host); 1122 1123 /* 1124 * This delay must be at least 74 clock sizes, or 1 ms, or the 1125 * time required to reach a stable voltage. 1126 */ 1127 mmc_delay(10); 1128 } 1129 1130 static void mmc_power_off(struct mmc_host *host) 1131 { 1132 host->ios.clock = 0; 1133 host->ios.vdd = 0; 1134 1135 /* 1136 * Reset ocr mask to be the highest possible voltage supported for 1137 * this mmc host. This value will be used at next power up. 1138 */ 1139 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1140 1141 if (!mmc_host_is_spi(host)) { 1142 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1143 host->ios.chip_select = MMC_CS_DONTCARE; 1144 } 1145 host->ios.power_mode = MMC_POWER_OFF; 1146 host->ios.bus_width = MMC_BUS_WIDTH_1; 1147 host->ios.timing = MMC_TIMING_LEGACY; 1148 mmc_set_ios(host); 1149 } 1150 1151 /* 1152 * Cleanup when the last reference to the bus operator is dropped. 1153 */ 1154 static void __mmc_release_bus(struct mmc_host *host) 1155 { 1156 BUG_ON(!host); 1157 BUG_ON(host->bus_refs); 1158 BUG_ON(!host->bus_dead); 1159 1160 host->bus_ops = NULL; 1161 } 1162 1163 /* 1164 * Increase reference count of bus operator 1165 */ 1166 static inline void mmc_bus_get(struct mmc_host *host) 1167 { 1168 unsigned long flags; 1169 1170 spin_lock_irqsave(&host->lock, flags); 1171 host->bus_refs++; 1172 spin_unlock_irqrestore(&host->lock, flags); 1173 } 1174 1175 /* 1176 * Decrease reference count of bus operator and free it if 1177 * it is the last reference. 1178 */ 1179 static inline void mmc_bus_put(struct mmc_host *host) 1180 { 1181 unsigned long flags; 1182 1183 spin_lock_irqsave(&host->lock, flags); 1184 host->bus_refs--; 1185 if ((host->bus_refs == 0) && host->bus_ops) 1186 __mmc_release_bus(host); 1187 spin_unlock_irqrestore(&host->lock, flags); 1188 } 1189 1190 /* 1191 * Assign a mmc bus handler to a host. Only one bus handler may control a 1192 * host at any given time. 1193 */ 1194 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1195 { 1196 unsigned long flags; 1197 1198 BUG_ON(!host); 1199 BUG_ON(!ops); 1200 1201 WARN_ON(!host->claimed); 1202 1203 spin_lock_irqsave(&host->lock, flags); 1204 1205 BUG_ON(host->bus_ops); 1206 BUG_ON(host->bus_refs); 1207 1208 host->bus_ops = ops; 1209 host->bus_refs = 1; 1210 host->bus_dead = 0; 1211 1212 spin_unlock_irqrestore(&host->lock, flags); 1213 } 1214 1215 /* 1216 * Remove the current bus handler from a host. Assumes that there are 1217 * no interesting cards left, so the bus is powered down. 1218 */ 1219 void mmc_detach_bus(struct mmc_host *host) 1220 { 1221 unsigned long flags; 1222 1223 BUG_ON(!host); 1224 1225 WARN_ON(!host->claimed); 1226 WARN_ON(!host->bus_ops); 1227 1228 spin_lock_irqsave(&host->lock, flags); 1229 1230 host->bus_dead = 1; 1231 1232 spin_unlock_irqrestore(&host->lock, flags); 1233 1234 mmc_power_off(host); 1235 1236 mmc_bus_put(host); 1237 } 1238 1239 /** 1240 * mmc_detect_change - process change of state on a MMC socket 1241 * @host: host which changed state. 1242 * @delay: optional delay to wait before detection (jiffies) 1243 * 1244 * MMC drivers should call this when they detect a card has been 1245 * inserted or removed. The MMC layer will confirm that any 1246 * present card is still functional, and initialize any newly 1247 * inserted. 1248 */ 1249 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1250 { 1251 #ifdef CONFIG_MMC_DEBUG 1252 unsigned long flags; 1253 spin_lock_irqsave(&host->lock, flags); 1254 WARN_ON(host->removed); 1255 spin_unlock_irqrestore(&host->lock, flags); 1256 #endif 1257 1258 mmc_schedule_delayed_work(&host->detect, delay); 1259 } 1260 1261 EXPORT_SYMBOL(mmc_detect_change); 1262 1263 void mmc_init_erase(struct mmc_card *card) 1264 { 1265 unsigned int sz; 1266 1267 if (is_power_of_2(card->erase_size)) 1268 card->erase_shift = ffs(card->erase_size) - 1; 1269 else 1270 card->erase_shift = 0; 1271 1272 /* 1273 * It is possible to erase an arbitrarily large area of an SD or MMC 1274 * card. That is not desirable because it can take a long time 1275 * (minutes) potentially delaying more important I/O, and also the 1276 * timeout calculations become increasingly hugely over-estimated. 1277 * Consequently, 'pref_erase' is defined as a guide to limit erases 1278 * to that size and alignment. 1279 * 1280 * For SD cards that define Allocation Unit size, limit erases to one 1281 * Allocation Unit at a time. For MMC cards that define High Capacity 1282 * Erase Size, whether it is switched on or not, limit to that size. 1283 * Otherwise just have a stab at a good value. For modern cards it 1284 * will end up being 4MiB. Note that if the value is too small, it 1285 * can end up taking longer to erase. 1286 */ 1287 if (mmc_card_sd(card) && card->ssr.au) { 1288 card->pref_erase = card->ssr.au; 1289 card->erase_shift = ffs(card->ssr.au) - 1; 1290 } else if (card->ext_csd.hc_erase_size) { 1291 card->pref_erase = card->ext_csd.hc_erase_size; 1292 } else { 1293 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1294 if (sz < 128) 1295 card->pref_erase = 512 * 1024 / 512; 1296 else if (sz < 512) 1297 card->pref_erase = 1024 * 1024 / 512; 1298 else if (sz < 1024) 1299 card->pref_erase = 2 * 1024 * 1024 / 512; 1300 else 1301 card->pref_erase = 4 * 1024 * 1024 / 512; 1302 if (card->pref_erase < card->erase_size) 1303 card->pref_erase = card->erase_size; 1304 else { 1305 sz = card->pref_erase % card->erase_size; 1306 if (sz) 1307 card->pref_erase += card->erase_size - sz; 1308 } 1309 } 1310 } 1311 1312 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1313 unsigned int arg, unsigned int qty) 1314 { 1315 unsigned int erase_timeout; 1316 1317 if (card->ext_csd.erase_group_def & 1) { 1318 /* High Capacity Erase Group Size uses HC timeouts */ 1319 if (arg == MMC_TRIM_ARG) 1320 erase_timeout = card->ext_csd.trim_timeout; 1321 else 1322 erase_timeout = card->ext_csd.hc_erase_timeout; 1323 } else { 1324 /* CSD Erase Group Size uses write timeout */ 1325 unsigned int mult = (10 << card->csd.r2w_factor); 1326 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1327 unsigned int timeout_us; 1328 1329 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1330 if (card->csd.tacc_ns < 1000000) 1331 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1332 else 1333 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1334 1335 /* 1336 * ios.clock is only a target. The real clock rate might be 1337 * less but not that much less, so fudge it by multiplying by 2. 1338 */ 1339 timeout_clks <<= 1; 1340 timeout_us += (timeout_clks * 1000) / 1341 (mmc_host_clk_rate(card->host) / 1000); 1342 1343 erase_timeout = timeout_us / 1000; 1344 1345 /* 1346 * Theoretically, the calculation could underflow so round up 1347 * to 1ms in that case. 1348 */ 1349 if (!erase_timeout) 1350 erase_timeout = 1; 1351 } 1352 1353 /* Multiplier for secure operations */ 1354 if (arg & MMC_SECURE_ARGS) { 1355 if (arg == MMC_SECURE_ERASE_ARG) 1356 erase_timeout *= card->ext_csd.sec_erase_mult; 1357 else 1358 erase_timeout *= card->ext_csd.sec_trim_mult; 1359 } 1360 1361 erase_timeout *= qty; 1362 1363 /* 1364 * Ensure at least a 1 second timeout for SPI as per 1365 * 'mmc_set_data_timeout()' 1366 */ 1367 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1368 erase_timeout = 1000; 1369 1370 return erase_timeout; 1371 } 1372 1373 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1374 unsigned int arg, 1375 unsigned int qty) 1376 { 1377 unsigned int erase_timeout; 1378 1379 if (card->ssr.erase_timeout) { 1380 /* Erase timeout specified in SD Status Register (SSR) */ 1381 erase_timeout = card->ssr.erase_timeout * qty + 1382 card->ssr.erase_offset; 1383 } else { 1384 /* 1385 * Erase timeout not specified in SD Status Register (SSR) so 1386 * use 250ms per write block. 1387 */ 1388 erase_timeout = 250 * qty; 1389 } 1390 1391 /* Must not be less than 1 second */ 1392 if (erase_timeout < 1000) 1393 erase_timeout = 1000; 1394 1395 return erase_timeout; 1396 } 1397 1398 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1399 unsigned int arg, 1400 unsigned int qty) 1401 { 1402 if (mmc_card_sd(card)) 1403 return mmc_sd_erase_timeout(card, arg, qty); 1404 else 1405 return mmc_mmc_erase_timeout(card, arg, qty); 1406 } 1407 1408 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1409 unsigned int to, unsigned int arg) 1410 { 1411 struct mmc_command cmd = {0}; 1412 unsigned int qty = 0; 1413 int err; 1414 1415 /* 1416 * qty is used to calculate the erase timeout which depends on how many 1417 * erase groups (or allocation units in SD terminology) are affected. 1418 * We count erasing part of an erase group as one erase group. 1419 * For SD, the allocation units are always a power of 2. For MMC, the 1420 * erase group size is almost certainly also power of 2, but it does not 1421 * seem to insist on that in the JEDEC standard, so we fall back to 1422 * division in that case. SD may not specify an allocation unit size, 1423 * in which case the timeout is based on the number of write blocks. 1424 * 1425 * Note that the timeout for secure trim 2 will only be correct if the 1426 * number of erase groups specified is the same as the total of all 1427 * preceding secure trim 1 commands. Since the power may have been 1428 * lost since the secure trim 1 commands occurred, it is generally 1429 * impossible to calculate the secure trim 2 timeout correctly. 1430 */ 1431 if (card->erase_shift) 1432 qty += ((to >> card->erase_shift) - 1433 (from >> card->erase_shift)) + 1; 1434 else if (mmc_card_sd(card)) 1435 qty += to - from + 1; 1436 else 1437 qty += ((to / card->erase_size) - 1438 (from / card->erase_size)) + 1; 1439 1440 if (!mmc_card_blockaddr(card)) { 1441 from <<= 9; 1442 to <<= 9; 1443 } 1444 1445 if (mmc_card_sd(card)) 1446 cmd.opcode = SD_ERASE_WR_BLK_START; 1447 else 1448 cmd.opcode = MMC_ERASE_GROUP_START; 1449 cmd.arg = from; 1450 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1451 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1452 if (err) { 1453 printk(KERN_ERR "mmc_erase: group start error %d, " 1454 "status %#x\n", err, cmd.resp[0]); 1455 err = -EINVAL; 1456 goto out; 1457 } 1458 1459 memset(&cmd, 0, sizeof(struct mmc_command)); 1460 if (mmc_card_sd(card)) 1461 cmd.opcode = SD_ERASE_WR_BLK_END; 1462 else 1463 cmd.opcode = MMC_ERASE_GROUP_END; 1464 cmd.arg = to; 1465 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1466 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1467 if (err) { 1468 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1469 err, cmd.resp[0]); 1470 err = -EINVAL; 1471 goto out; 1472 } 1473 1474 memset(&cmd, 0, sizeof(struct mmc_command)); 1475 cmd.opcode = MMC_ERASE; 1476 cmd.arg = arg; 1477 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1478 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1479 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1480 if (err) { 1481 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1482 err, cmd.resp[0]); 1483 err = -EIO; 1484 goto out; 1485 } 1486 1487 if (mmc_host_is_spi(card->host)) 1488 goto out; 1489 1490 do { 1491 memset(&cmd, 0, sizeof(struct mmc_command)); 1492 cmd.opcode = MMC_SEND_STATUS; 1493 cmd.arg = card->rca << 16; 1494 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1495 /* Do not retry else we can't see errors */ 1496 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1497 if (err || (cmd.resp[0] & 0xFDF92000)) { 1498 printk(KERN_ERR "error %d requesting status %#x\n", 1499 err, cmd.resp[0]); 1500 err = -EIO; 1501 goto out; 1502 } 1503 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1504 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1505 out: 1506 return err; 1507 } 1508 1509 /** 1510 * mmc_erase - erase sectors. 1511 * @card: card to erase 1512 * @from: first sector to erase 1513 * @nr: number of sectors to erase 1514 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1515 * 1516 * Caller must claim host before calling this function. 1517 */ 1518 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1519 unsigned int arg) 1520 { 1521 unsigned int rem, to = from + nr; 1522 1523 if (!(card->host->caps & MMC_CAP_ERASE) || 1524 !(card->csd.cmdclass & CCC_ERASE)) 1525 return -EOPNOTSUPP; 1526 1527 if (!card->erase_size) 1528 return -EOPNOTSUPP; 1529 1530 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1531 return -EOPNOTSUPP; 1532 1533 if ((arg & MMC_SECURE_ARGS) && 1534 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1535 return -EOPNOTSUPP; 1536 1537 if ((arg & MMC_TRIM_ARGS) && 1538 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1539 return -EOPNOTSUPP; 1540 1541 if (arg == MMC_SECURE_ERASE_ARG) { 1542 if (from % card->erase_size || nr % card->erase_size) 1543 return -EINVAL; 1544 } 1545 1546 if (arg == MMC_ERASE_ARG) { 1547 rem = from % card->erase_size; 1548 if (rem) { 1549 rem = card->erase_size - rem; 1550 from += rem; 1551 if (nr > rem) 1552 nr -= rem; 1553 else 1554 return 0; 1555 } 1556 rem = nr % card->erase_size; 1557 if (rem) 1558 nr -= rem; 1559 } 1560 1561 if (nr == 0) 1562 return 0; 1563 1564 to = from + nr; 1565 1566 if (to <= from) 1567 return -EINVAL; 1568 1569 /* 'from' and 'to' are inclusive */ 1570 to -= 1; 1571 1572 return mmc_do_erase(card, from, to, arg); 1573 } 1574 EXPORT_SYMBOL(mmc_erase); 1575 1576 int mmc_can_erase(struct mmc_card *card) 1577 { 1578 if ((card->host->caps & MMC_CAP_ERASE) && 1579 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1580 return 1; 1581 return 0; 1582 } 1583 EXPORT_SYMBOL(mmc_can_erase); 1584 1585 int mmc_can_trim(struct mmc_card *card) 1586 { 1587 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1588 return 1; 1589 return 0; 1590 } 1591 EXPORT_SYMBOL(mmc_can_trim); 1592 1593 int mmc_can_secure_erase_trim(struct mmc_card *card) 1594 { 1595 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1596 return 1; 1597 return 0; 1598 } 1599 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1600 1601 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1602 unsigned int nr) 1603 { 1604 if (!card->erase_size) 1605 return 0; 1606 if (from % card->erase_size || nr % card->erase_size) 1607 return 0; 1608 return 1; 1609 } 1610 EXPORT_SYMBOL(mmc_erase_group_aligned); 1611 1612 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 1613 unsigned int arg) 1614 { 1615 struct mmc_host *host = card->host; 1616 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 1617 unsigned int last_timeout = 0; 1618 1619 if (card->erase_shift) 1620 max_qty = UINT_MAX >> card->erase_shift; 1621 else if (mmc_card_sd(card)) 1622 max_qty = UINT_MAX; 1623 else 1624 max_qty = UINT_MAX / card->erase_size; 1625 1626 /* Find the largest qty with an OK timeout */ 1627 do { 1628 y = 0; 1629 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 1630 timeout = mmc_erase_timeout(card, arg, qty + x); 1631 if (timeout > host->max_discard_to) 1632 break; 1633 if (timeout < last_timeout) 1634 break; 1635 last_timeout = timeout; 1636 y = x; 1637 } 1638 qty += y; 1639 } while (y); 1640 1641 if (!qty) 1642 return 0; 1643 1644 if (qty == 1) 1645 return 1; 1646 1647 /* Convert qty to sectors */ 1648 if (card->erase_shift) 1649 max_discard = --qty << card->erase_shift; 1650 else if (mmc_card_sd(card)) 1651 max_discard = qty; 1652 else 1653 max_discard = --qty * card->erase_size; 1654 1655 return max_discard; 1656 } 1657 1658 unsigned int mmc_calc_max_discard(struct mmc_card *card) 1659 { 1660 struct mmc_host *host = card->host; 1661 unsigned int max_discard, max_trim; 1662 1663 if (!host->max_discard_to) 1664 return UINT_MAX; 1665 1666 /* 1667 * Without erase_group_def set, MMC erase timeout depends on clock 1668 * frequence which can change. In that case, the best choice is 1669 * just the preferred erase size. 1670 */ 1671 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 1672 return card->pref_erase; 1673 1674 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 1675 if (mmc_can_trim(card)) { 1676 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 1677 if (max_trim < max_discard) 1678 max_discard = max_trim; 1679 } else if (max_discard < card->erase_size) { 1680 max_discard = 0; 1681 } 1682 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 1683 mmc_hostname(host), max_discard, host->max_discard_to); 1684 return max_discard; 1685 } 1686 EXPORT_SYMBOL(mmc_calc_max_discard); 1687 1688 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1689 { 1690 struct mmc_command cmd = {0}; 1691 1692 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1693 return 0; 1694 1695 cmd.opcode = MMC_SET_BLOCKLEN; 1696 cmd.arg = blocklen; 1697 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1698 return mmc_wait_for_cmd(card->host, &cmd, 5); 1699 } 1700 EXPORT_SYMBOL(mmc_set_blocklen); 1701 1702 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1703 { 1704 host->f_init = freq; 1705 1706 #ifdef CONFIG_MMC_DEBUG 1707 pr_info("%s: %s: trying to init card at %u Hz\n", 1708 mmc_hostname(host), __func__, host->f_init); 1709 #endif 1710 mmc_power_up(host); 1711 1712 /* 1713 * sdio_reset sends CMD52 to reset card. Since we do not know 1714 * if the card is being re-initialized, just send it. CMD52 1715 * should be ignored by SD/eMMC cards. 1716 */ 1717 sdio_reset(host); 1718 mmc_go_idle(host); 1719 1720 mmc_send_if_cond(host, host->ocr_avail); 1721 1722 /* Order's important: probe SDIO, then SD, then MMC */ 1723 if (!mmc_attach_sdio(host)) 1724 return 0; 1725 if (!mmc_attach_sd(host)) 1726 return 0; 1727 if (!mmc_attach_mmc(host)) 1728 return 0; 1729 1730 mmc_power_off(host); 1731 return -EIO; 1732 } 1733 1734 void mmc_rescan(struct work_struct *work) 1735 { 1736 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 1737 struct mmc_host *host = 1738 container_of(work, struct mmc_host, detect.work); 1739 int i; 1740 1741 if (host->rescan_disable) 1742 return; 1743 1744 mmc_bus_get(host); 1745 1746 /* 1747 * if there is a _removable_ card registered, check whether it is 1748 * still present 1749 */ 1750 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1751 && !(host->caps & MMC_CAP_NONREMOVABLE)) 1752 host->bus_ops->detect(host); 1753 1754 /* 1755 * Let mmc_bus_put() free the bus/bus_ops if we've found that 1756 * the card is no longer present. 1757 */ 1758 mmc_bus_put(host); 1759 mmc_bus_get(host); 1760 1761 /* if there still is a card present, stop here */ 1762 if (host->bus_ops != NULL) { 1763 mmc_bus_put(host); 1764 goto out; 1765 } 1766 1767 /* 1768 * Only we can add a new handler, so it's safe to 1769 * release the lock here. 1770 */ 1771 mmc_bus_put(host); 1772 1773 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1774 goto out; 1775 1776 mmc_claim_host(host); 1777 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1778 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1779 break; 1780 if (freqs[i] <= host->f_min) 1781 break; 1782 } 1783 mmc_release_host(host); 1784 1785 out: 1786 if (host->caps & MMC_CAP_NEEDS_POLL) 1787 mmc_schedule_delayed_work(&host->detect, HZ); 1788 } 1789 1790 void mmc_start_host(struct mmc_host *host) 1791 { 1792 mmc_power_off(host); 1793 mmc_detect_change(host, 0); 1794 } 1795 1796 void mmc_stop_host(struct mmc_host *host) 1797 { 1798 #ifdef CONFIG_MMC_DEBUG 1799 unsigned long flags; 1800 spin_lock_irqsave(&host->lock, flags); 1801 host->removed = 1; 1802 spin_unlock_irqrestore(&host->lock, flags); 1803 #endif 1804 1805 if (host->caps & MMC_CAP_DISABLE) 1806 cancel_delayed_work(&host->disable); 1807 cancel_delayed_work_sync(&host->detect); 1808 mmc_flush_scheduled_work(); 1809 1810 /* clear pm flags now and let card drivers set them as needed */ 1811 host->pm_flags = 0; 1812 1813 mmc_bus_get(host); 1814 if (host->bus_ops && !host->bus_dead) { 1815 if (host->bus_ops->remove) 1816 host->bus_ops->remove(host); 1817 1818 mmc_claim_host(host); 1819 mmc_detach_bus(host); 1820 mmc_release_host(host); 1821 mmc_bus_put(host); 1822 return; 1823 } 1824 mmc_bus_put(host); 1825 1826 BUG_ON(host->card); 1827 1828 mmc_power_off(host); 1829 } 1830 1831 int mmc_power_save_host(struct mmc_host *host) 1832 { 1833 int ret = 0; 1834 1835 #ifdef CONFIG_MMC_DEBUG 1836 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 1837 #endif 1838 1839 mmc_bus_get(host); 1840 1841 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1842 mmc_bus_put(host); 1843 return -EINVAL; 1844 } 1845 1846 if (host->bus_ops->power_save) 1847 ret = host->bus_ops->power_save(host); 1848 1849 mmc_bus_put(host); 1850 1851 mmc_power_off(host); 1852 1853 return ret; 1854 } 1855 EXPORT_SYMBOL(mmc_power_save_host); 1856 1857 int mmc_power_restore_host(struct mmc_host *host) 1858 { 1859 int ret; 1860 1861 #ifdef CONFIG_MMC_DEBUG 1862 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 1863 #endif 1864 1865 mmc_bus_get(host); 1866 1867 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1868 mmc_bus_put(host); 1869 return -EINVAL; 1870 } 1871 1872 mmc_power_up(host); 1873 ret = host->bus_ops->power_restore(host); 1874 1875 mmc_bus_put(host); 1876 1877 return ret; 1878 } 1879 EXPORT_SYMBOL(mmc_power_restore_host); 1880 1881 int mmc_card_awake(struct mmc_host *host) 1882 { 1883 int err = -ENOSYS; 1884 1885 mmc_bus_get(host); 1886 1887 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1888 err = host->bus_ops->awake(host); 1889 1890 mmc_bus_put(host); 1891 1892 return err; 1893 } 1894 EXPORT_SYMBOL(mmc_card_awake); 1895 1896 int mmc_card_sleep(struct mmc_host *host) 1897 { 1898 int err = -ENOSYS; 1899 1900 mmc_bus_get(host); 1901 1902 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1903 err = host->bus_ops->sleep(host); 1904 1905 mmc_bus_put(host); 1906 1907 return err; 1908 } 1909 EXPORT_SYMBOL(mmc_card_sleep); 1910 1911 int mmc_card_can_sleep(struct mmc_host *host) 1912 { 1913 struct mmc_card *card = host->card; 1914 1915 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1916 return 1; 1917 return 0; 1918 } 1919 EXPORT_SYMBOL(mmc_card_can_sleep); 1920 1921 #ifdef CONFIG_PM 1922 1923 /** 1924 * mmc_suspend_host - suspend a host 1925 * @host: mmc host 1926 */ 1927 int mmc_suspend_host(struct mmc_host *host) 1928 { 1929 int err = 0; 1930 1931 if (host->caps & MMC_CAP_DISABLE) 1932 cancel_delayed_work(&host->disable); 1933 cancel_delayed_work(&host->detect); 1934 mmc_flush_scheduled_work(); 1935 1936 mmc_bus_get(host); 1937 if (host->bus_ops && !host->bus_dead) { 1938 if (host->bus_ops->suspend) 1939 err = host->bus_ops->suspend(host); 1940 if (err == -ENOSYS || !host->bus_ops->resume) { 1941 /* 1942 * We simply "remove" the card in this case. 1943 * It will be redetected on resume. 1944 */ 1945 if (host->bus_ops->remove) 1946 host->bus_ops->remove(host); 1947 mmc_claim_host(host); 1948 mmc_detach_bus(host); 1949 mmc_release_host(host); 1950 host->pm_flags = 0; 1951 err = 0; 1952 } 1953 } 1954 mmc_bus_put(host); 1955 1956 if (!err && !mmc_card_keep_power(host)) 1957 mmc_power_off(host); 1958 1959 return err; 1960 } 1961 1962 EXPORT_SYMBOL(mmc_suspend_host); 1963 1964 /** 1965 * mmc_resume_host - resume a previously suspended host 1966 * @host: mmc host 1967 */ 1968 int mmc_resume_host(struct mmc_host *host) 1969 { 1970 int err = 0; 1971 1972 mmc_bus_get(host); 1973 if (host->bus_ops && !host->bus_dead) { 1974 if (!mmc_card_keep_power(host)) { 1975 mmc_power_up(host); 1976 mmc_select_voltage(host, host->ocr); 1977 /* 1978 * Tell runtime PM core we just powered up the card, 1979 * since it still believes the card is powered off. 1980 * Note that currently runtime PM is only enabled 1981 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 1982 */ 1983 if (mmc_card_sdio(host->card) && 1984 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 1985 pm_runtime_disable(&host->card->dev); 1986 pm_runtime_set_active(&host->card->dev); 1987 pm_runtime_enable(&host->card->dev); 1988 } 1989 } 1990 BUG_ON(!host->bus_ops->resume); 1991 err = host->bus_ops->resume(host); 1992 if (err) { 1993 printk(KERN_WARNING "%s: error %d during resume " 1994 "(card was removed?)\n", 1995 mmc_hostname(host), err); 1996 err = 0; 1997 } 1998 } 1999 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2000 mmc_bus_put(host); 2001 2002 return err; 2003 } 2004 EXPORT_SYMBOL(mmc_resume_host); 2005 2006 /* Do the card removal on suspend if card is assumed removeable 2007 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2008 to sync the card. 2009 */ 2010 int mmc_pm_notify(struct notifier_block *notify_block, 2011 unsigned long mode, void *unused) 2012 { 2013 struct mmc_host *host = container_of( 2014 notify_block, struct mmc_host, pm_notify); 2015 unsigned long flags; 2016 2017 2018 switch (mode) { 2019 case PM_HIBERNATION_PREPARE: 2020 case PM_SUSPEND_PREPARE: 2021 2022 spin_lock_irqsave(&host->lock, flags); 2023 host->rescan_disable = 1; 2024 spin_unlock_irqrestore(&host->lock, flags); 2025 cancel_delayed_work_sync(&host->detect); 2026 2027 if (!host->bus_ops || host->bus_ops->suspend) 2028 break; 2029 2030 mmc_claim_host(host); 2031 2032 if (host->bus_ops->remove) 2033 host->bus_ops->remove(host); 2034 2035 mmc_detach_bus(host); 2036 mmc_release_host(host); 2037 host->pm_flags = 0; 2038 break; 2039 2040 case PM_POST_SUSPEND: 2041 case PM_POST_HIBERNATION: 2042 case PM_POST_RESTORE: 2043 2044 spin_lock_irqsave(&host->lock, flags); 2045 host->rescan_disable = 0; 2046 spin_unlock_irqrestore(&host->lock, flags); 2047 mmc_detect_change(host, 0); 2048 2049 } 2050 2051 return 0; 2052 } 2053 #endif 2054 2055 static int __init mmc_init(void) 2056 { 2057 int ret; 2058 2059 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2060 if (!workqueue) 2061 return -ENOMEM; 2062 2063 ret = mmc_register_bus(); 2064 if (ret) 2065 goto destroy_workqueue; 2066 2067 ret = mmc_register_host_class(); 2068 if (ret) 2069 goto unregister_bus; 2070 2071 ret = sdio_register_bus(); 2072 if (ret) 2073 goto unregister_host_class; 2074 2075 return 0; 2076 2077 unregister_host_class: 2078 mmc_unregister_host_class(); 2079 unregister_bus: 2080 mmc_unregister_bus(); 2081 destroy_workqueue: 2082 destroy_workqueue(workqueue); 2083 2084 return ret; 2085 } 2086 2087 static void __exit mmc_exit(void) 2088 { 2089 sdio_unregister_bus(); 2090 mmc_unregister_host_class(); 2091 mmc_unregister_bus(); 2092 destroy_workqueue(workqueue); 2093 } 2094 2095 subsys_initcall(mmc_init); 2096 module_exit(mmc_exit); 2097 2098 MODULE_LICENSE("GPL"); 2099