1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/suspend.h> 27 28 #include <linux/mmc/card.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/mmc.h> 31 #include <linux/mmc/sd.h> 32 33 #include "core.h" 34 #include "bus.h" 35 #include "host.h" 36 #include "sdio_bus.h" 37 38 #include "mmc_ops.h" 39 #include "sd_ops.h" 40 #include "sdio_ops.h" 41 42 static struct workqueue_struct *workqueue; 43 44 /* 45 * Enabling software CRCs on the data blocks can be a significant (30%) 46 * performance cost, and for other reasons may not always be desired. 47 * So we allow it it to be disabled. 48 */ 49 int use_spi_crc = 1; 50 module_param(use_spi_crc, bool, 0); 51 52 /* 53 * We normally treat cards as removed during suspend if they are not 54 * known to be on a non-removable bus, to avoid the risk of writing 55 * back data to a different card after resume. Allow this to be 56 * overridden if necessary. 57 */ 58 #ifdef CONFIG_MMC_UNSAFE_RESUME 59 int mmc_assume_removable; 60 #else 61 int mmc_assume_removable = 1; 62 #endif 63 EXPORT_SYMBOL(mmc_assume_removable); 64 module_param_named(removable, mmc_assume_removable, bool, 0644); 65 MODULE_PARM_DESC( 66 removable, 67 "MMC/SD cards are removable and may be removed during suspend"); 68 69 /* 70 * Internal function. Schedule delayed work in the MMC work queue. 71 */ 72 static int mmc_schedule_delayed_work(struct delayed_work *work, 73 unsigned long delay) 74 { 75 return queue_delayed_work(workqueue, work, delay); 76 } 77 78 /* 79 * Internal function. Flush all scheduled work from the MMC work queue. 80 */ 81 static void mmc_flush_scheduled_work(void) 82 { 83 flush_workqueue(workqueue); 84 } 85 86 /** 87 * mmc_request_done - finish processing an MMC request 88 * @host: MMC host which completed request 89 * @mrq: MMC request which request 90 * 91 * MMC drivers should call this function when they have completed 92 * their processing of a request. 93 */ 94 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 95 { 96 struct mmc_command *cmd = mrq->cmd; 97 int err = cmd->error; 98 99 if (err && cmd->retries && mmc_host_is_spi(host)) { 100 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 101 cmd->retries = 0; 102 } 103 104 if (err && cmd->retries) { 105 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 106 mmc_hostname(host), cmd->opcode, err); 107 108 cmd->retries--; 109 cmd->error = 0; 110 host->ops->request(host, mrq); 111 } else { 112 led_trigger_event(host->led, LED_OFF); 113 114 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 115 mmc_hostname(host), cmd->opcode, err, 116 cmd->resp[0], cmd->resp[1], 117 cmd->resp[2], cmd->resp[3]); 118 119 if (mrq->data) { 120 pr_debug("%s: %d bytes transferred: %d\n", 121 mmc_hostname(host), 122 mrq->data->bytes_xfered, mrq->data->error); 123 } 124 125 if (mrq->stop) { 126 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 127 mmc_hostname(host), mrq->stop->opcode, 128 mrq->stop->error, 129 mrq->stop->resp[0], mrq->stop->resp[1], 130 mrq->stop->resp[2], mrq->stop->resp[3]); 131 } 132 133 if (mrq->done) 134 mrq->done(mrq); 135 136 mmc_host_clk_gate(host); 137 } 138 } 139 140 EXPORT_SYMBOL(mmc_request_done); 141 142 static void 143 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 144 { 145 #ifdef CONFIG_MMC_DEBUG 146 unsigned int i, sz; 147 struct scatterlist *sg; 148 #endif 149 150 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 151 mmc_hostname(host), mrq->cmd->opcode, 152 mrq->cmd->arg, mrq->cmd->flags); 153 154 if (mrq->data) { 155 pr_debug("%s: blksz %d blocks %d flags %08x " 156 "tsac %d ms nsac %d\n", 157 mmc_hostname(host), mrq->data->blksz, 158 mrq->data->blocks, mrq->data->flags, 159 mrq->data->timeout_ns / 1000000, 160 mrq->data->timeout_clks); 161 } 162 163 if (mrq->stop) { 164 pr_debug("%s: CMD%u arg %08x flags %08x\n", 165 mmc_hostname(host), mrq->stop->opcode, 166 mrq->stop->arg, mrq->stop->flags); 167 } 168 169 WARN_ON(!host->claimed); 170 171 mrq->cmd->error = 0; 172 mrq->cmd->mrq = mrq; 173 if (mrq->data) { 174 BUG_ON(mrq->data->blksz > host->max_blk_size); 175 BUG_ON(mrq->data->blocks > host->max_blk_count); 176 BUG_ON(mrq->data->blocks * mrq->data->blksz > 177 host->max_req_size); 178 179 #ifdef CONFIG_MMC_DEBUG 180 sz = 0; 181 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 182 sz += sg->length; 183 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 184 #endif 185 186 mrq->cmd->data = mrq->data; 187 mrq->data->error = 0; 188 mrq->data->mrq = mrq; 189 if (mrq->stop) { 190 mrq->data->stop = mrq->stop; 191 mrq->stop->error = 0; 192 mrq->stop->mrq = mrq; 193 } 194 } 195 mmc_host_clk_ungate(host); 196 led_trigger_event(host->led, LED_FULL); 197 host->ops->request(host, mrq); 198 } 199 200 static void mmc_wait_done(struct mmc_request *mrq) 201 { 202 complete(&mrq->completion); 203 } 204 205 static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 206 { 207 init_completion(&mrq->completion); 208 mrq->done = mmc_wait_done; 209 mmc_start_request(host, mrq); 210 } 211 212 static void mmc_wait_for_req_done(struct mmc_host *host, 213 struct mmc_request *mrq) 214 { 215 wait_for_completion(&mrq->completion); 216 } 217 218 /** 219 * mmc_pre_req - Prepare for a new request 220 * @host: MMC host to prepare command 221 * @mrq: MMC request to prepare for 222 * @is_first_req: true if there is no previous started request 223 * that may run in parellel to this call, otherwise false 224 * 225 * mmc_pre_req() is called in prior to mmc_start_req() to let 226 * host prepare for the new request. Preparation of a request may be 227 * performed while another request is running on the host. 228 */ 229 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 230 bool is_first_req) 231 { 232 if (host->ops->pre_req) 233 host->ops->pre_req(host, mrq, is_first_req); 234 } 235 236 /** 237 * mmc_post_req - Post process a completed request 238 * @host: MMC host to post process command 239 * @mrq: MMC request to post process for 240 * @err: Error, if non zero, clean up any resources made in pre_req 241 * 242 * Let the host post process a completed request. Post processing of 243 * a request may be performed while another reuqest is running. 244 */ 245 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 246 int err) 247 { 248 if (host->ops->post_req) 249 host->ops->post_req(host, mrq, err); 250 } 251 252 /** 253 * mmc_start_req - start a non-blocking request 254 * @host: MMC host to start command 255 * @areq: async request to start 256 * @error: out parameter returns 0 for success, otherwise non zero 257 * 258 * Start a new MMC custom command request for a host. 259 * If there is on ongoing async request wait for completion 260 * of that request and start the new one and return. 261 * Does not wait for the new request to complete. 262 * 263 * Returns the completed request, NULL in case of none completed. 264 * Wait for the an ongoing request (previoulsy started) to complete and 265 * return the completed request. If there is no ongoing request, NULL 266 * is returned without waiting. NULL is not an error condition. 267 */ 268 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 269 struct mmc_async_req *areq, int *error) 270 { 271 int err = 0; 272 struct mmc_async_req *data = host->areq; 273 274 /* Prepare a new request */ 275 if (areq) 276 mmc_pre_req(host, areq->mrq, !host->areq); 277 278 if (host->areq) { 279 mmc_wait_for_req_done(host, host->areq->mrq); 280 err = host->areq->err_check(host->card, host->areq); 281 if (err) { 282 mmc_post_req(host, host->areq->mrq, 0); 283 if (areq) 284 mmc_post_req(host, areq->mrq, -EINVAL); 285 286 host->areq = NULL; 287 goto out; 288 } 289 } 290 291 if (areq) 292 __mmc_start_req(host, areq->mrq); 293 294 if (host->areq) 295 mmc_post_req(host, host->areq->mrq, 0); 296 297 host->areq = areq; 298 out: 299 if (error) 300 *error = err; 301 return data; 302 } 303 EXPORT_SYMBOL(mmc_start_req); 304 305 /** 306 * mmc_wait_for_req - start a request and wait for completion 307 * @host: MMC host to start command 308 * @mrq: MMC request to start 309 * 310 * Start a new MMC custom command request for a host, and wait 311 * for the command to complete. Does not attempt to parse the 312 * response. 313 */ 314 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 315 { 316 __mmc_start_req(host, mrq); 317 mmc_wait_for_req_done(host, mrq); 318 } 319 EXPORT_SYMBOL(mmc_wait_for_req); 320 321 /** 322 * mmc_wait_for_cmd - start a command and wait for completion 323 * @host: MMC host to start command 324 * @cmd: MMC command to start 325 * @retries: maximum number of retries 326 * 327 * Start a new MMC command for a host, and wait for the command 328 * to complete. Return any error that occurred while the command 329 * was executing. Do not attempt to parse the response. 330 */ 331 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 332 { 333 struct mmc_request mrq = {0}; 334 335 WARN_ON(!host->claimed); 336 337 memset(cmd->resp, 0, sizeof(cmd->resp)); 338 cmd->retries = retries; 339 340 mrq.cmd = cmd; 341 cmd->data = NULL; 342 343 mmc_wait_for_req(host, &mrq); 344 345 return cmd->error; 346 } 347 348 EXPORT_SYMBOL(mmc_wait_for_cmd); 349 350 /** 351 * mmc_set_data_timeout - set the timeout for a data command 352 * @data: data phase for command 353 * @card: the MMC card associated with the data transfer 354 * 355 * Computes the data timeout parameters according to the 356 * correct algorithm given the card type. 357 */ 358 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 359 { 360 unsigned int mult; 361 362 /* 363 * SDIO cards only define an upper 1 s limit on access. 364 */ 365 if (mmc_card_sdio(card)) { 366 data->timeout_ns = 1000000000; 367 data->timeout_clks = 0; 368 return; 369 } 370 371 /* 372 * SD cards use a 100 multiplier rather than 10 373 */ 374 mult = mmc_card_sd(card) ? 100 : 10; 375 376 /* 377 * Scale up the multiplier (and therefore the timeout) by 378 * the r2w factor for writes. 379 */ 380 if (data->flags & MMC_DATA_WRITE) 381 mult <<= card->csd.r2w_factor; 382 383 data->timeout_ns = card->csd.tacc_ns * mult; 384 data->timeout_clks = card->csd.tacc_clks * mult; 385 386 /* 387 * SD cards also have an upper limit on the timeout. 388 */ 389 if (mmc_card_sd(card)) { 390 unsigned int timeout_us, limit_us; 391 392 timeout_us = data->timeout_ns / 1000; 393 if (mmc_host_clk_rate(card->host)) 394 timeout_us += data->timeout_clks * 1000 / 395 (mmc_host_clk_rate(card->host) / 1000); 396 397 if (data->flags & MMC_DATA_WRITE) 398 /* 399 * The limit is really 250 ms, but that is 400 * insufficient for some crappy cards. 401 */ 402 limit_us = 300000; 403 else 404 limit_us = 100000; 405 406 /* 407 * SDHC cards always use these fixed values. 408 */ 409 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 410 data->timeout_ns = limit_us * 1000; 411 data->timeout_clks = 0; 412 } 413 } 414 /* 415 * Some cards need very high timeouts if driven in SPI mode. 416 * The worst observed timeout was 900ms after writing a 417 * continuous stream of data until the internal logic 418 * overflowed. 419 */ 420 if (mmc_host_is_spi(card->host)) { 421 if (data->flags & MMC_DATA_WRITE) { 422 if (data->timeout_ns < 1000000000) 423 data->timeout_ns = 1000000000; /* 1s */ 424 } else { 425 if (data->timeout_ns < 100000000) 426 data->timeout_ns = 100000000; /* 100ms */ 427 } 428 } 429 } 430 EXPORT_SYMBOL(mmc_set_data_timeout); 431 432 /** 433 * mmc_align_data_size - pads a transfer size to a more optimal value 434 * @card: the MMC card associated with the data transfer 435 * @sz: original transfer size 436 * 437 * Pads the original data size with a number of extra bytes in 438 * order to avoid controller bugs and/or performance hits 439 * (e.g. some controllers revert to PIO for certain sizes). 440 * 441 * Returns the improved size, which might be unmodified. 442 * 443 * Note that this function is only relevant when issuing a 444 * single scatter gather entry. 445 */ 446 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 447 { 448 /* 449 * FIXME: We don't have a system for the controller to tell 450 * the core about its problems yet, so for now we just 32-bit 451 * align the size. 452 */ 453 sz = ((sz + 3) / 4) * 4; 454 455 return sz; 456 } 457 EXPORT_SYMBOL(mmc_align_data_size); 458 459 /** 460 * mmc_host_enable - enable a host. 461 * @host: mmc host to enable 462 * 463 * Hosts that support power saving can use the 'enable' and 'disable' 464 * methods to exit and enter power saving states. For more information 465 * see comments for struct mmc_host_ops. 466 */ 467 int mmc_host_enable(struct mmc_host *host) 468 { 469 if (!(host->caps & MMC_CAP_DISABLE)) 470 return 0; 471 472 if (host->en_dis_recurs) 473 return 0; 474 475 if (host->nesting_cnt++) 476 return 0; 477 478 cancel_delayed_work_sync(&host->disable); 479 480 if (host->enabled) 481 return 0; 482 483 if (host->ops->enable) { 484 int err; 485 486 host->en_dis_recurs = 1; 487 err = host->ops->enable(host); 488 host->en_dis_recurs = 0; 489 490 if (err) { 491 pr_debug("%s: enable error %d\n", 492 mmc_hostname(host), err); 493 return err; 494 } 495 } 496 host->enabled = 1; 497 return 0; 498 } 499 EXPORT_SYMBOL(mmc_host_enable); 500 501 static int mmc_host_do_disable(struct mmc_host *host, int lazy) 502 { 503 if (host->ops->disable) { 504 int err; 505 506 host->en_dis_recurs = 1; 507 err = host->ops->disable(host, lazy); 508 host->en_dis_recurs = 0; 509 510 if (err < 0) { 511 pr_debug("%s: disable error %d\n", 512 mmc_hostname(host), err); 513 return err; 514 } 515 if (err > 0) { 516 unsigned long delay = msecs_to_jiffies(err); 517 518 mmc_schedule_delayed_work(&host->disable, delay); 519 } 520 } 521 host->enabled = 0; 522 return 0; 523 } 524 525 /** 526 * mmc_host_disable - disable a host. 527 * @host: mmc host to disable 528 * 529 * Hosts that support power saving can use the 'enable' and 'disable' 530 * methods to exit and enter power saving states. For more information 531 * see comments for struct mmc_host_ops. 532 */ 533 int mmc_host_disable(struct mmc_host *host) 534 { 535 int err; 536 537 if (!(host->caps & MMC_CAP_DISABLE)) 538 return 0; 539 540 if (host->en_dis_recurs) 541 return 0; 542 543 if (--host->nesting_cnt) 544 return 0; 545 546 if (!host->enabled) 547 return 0; 548 549 err = mmc_host_do_disable(host, 0); 550 return err; 551 } 552 EXPORT_SYMBOL(mmc_host_disable); 553 554 /** 555 * __mmc_claim_host - exclusively claim a host 556 * @host: mmc host to claim 557 * @abort: whether or not the operation should be aborted 558 * 559 * Claim a host for a set of operations. If @abort is non null and 560 * dereference a non-zero value then this will return prematurely with 561 * that non-zero value without acquiring the lock. Returns zero 562 * with the lock held otherwise. 563 */ 564 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 565 { 566 DECLARE_WAITQUEUE(wait, current); 567 unsigned long flags; 568 int stop; 569 570 might_sleep(); 571 572 add_wait_queue(&host->wq, &wait); 573 spin_lock_irqsave(&host->lock, flags); 574 while (1) { 575 set_current_state(TASK_UNINTERRUPTIBLE); 576 stop = abort ? atomic_read(abort) : 0; 577 if (stop || !host->claimed || host->claimer == current) 578 break; 579 spin_unlock_irqrestore(&host->lock, flags); 580 schedule(); 581 spin_lock_irqsave(&host->lock, flags); 582 } 583 set_current_state(TASK_RUNNING); 584 if (!stop) { 585 host->claimed = 1; 586 host->claimer = current; 587 host->claim_cnt += 1; 588 } else 589 wake_up(&host->wq); 590 spin_unlock_irqrestore(&host->lock, flags); 591 remove_wait_queue(&host->wq, &wait); 592 if (!stop) 593 mmc_host_enable(host); 594 return stop; 595 } 596 597 EXPORT_SYMBOL(__mmc_claim_host); 598 599 /** 600 * mmc_try_claim_host - try exclusively to claim a host 601 * @host: mmc host to claim 602 * 603 * Returns %1 if the host is claimed, %0 otherwise. 604 */ 605 int mmc_try_claim_host(struct mmc_host *host) 606 { 607 int claimed_host = 0; 608 unsigned long flags; 609 610 spin_lock_irqsave(&host->lock, flags); 611 if (!host->claimed || host->claimer == current) { 612 host->claimed = 1; 613 host->claimer = current; 614 host->claim_cnt += 1; 615 claimed_host = 1; 616 } 617 spin_unlock_irqrestore(&host->lock, flags); 618 return claimed_host; 619 } 620 EXPORT_SYMBOL(mmc_try_claim_host); 621 622 /** 623 * mmc_do_release_host - release a claimed host 624 * @host: mmc host to release 625 * 626 * If you successfully claimed a host, this function will 627 * release it again. 628 */ 629 void mmc_do_release_host(struct mmc_host *host) 630 { 631 unsigned long flags; 632 633 spin_lock_irqsave(&host->lock, flags); 634 if (--host->claim_cnt) { 635 /* Release for nested claim */ 636 spin_unlock_irqrestore(&host->lock, flags); 637 } else { 638 host->claimed = 0; 639 host->claimer = NULL; 640 spin_unlock_irqrestore(&host->lock, flags); 641 wake_up(&host->wq); 642 } 643 } 644 EXPORT_SYMBOL(mmc_do_release_host); 645 646 void mmc_host_deeper_disable(struct work_struct *work) 647 { 648 struct mmc_host *host = 649 container_of(work, struct mmc_host, disable.work); 650 651 /* If the host is claimed then we do not want to disable it anymore */ 652 if (!mmc_try_claim_host(host)) 653 return; 654 mmc_host_do_disable(host, 1); 655 mmc_do_release_host(host); 656 } 657 658 /** 659 * mmc_host_lazy_disable - lazily disable a host. 660 * @host: mmc host to disable 661 * 662 * Hosts that support power saving can use the 'enable' and 'disable' 663 * methods to exit and enter power saving states. For more information 664 * see comments for struct mmc_host_ops. 665 */ 666 int mmc_host_lazy_disable(struct mmc_host *host) 667 { 668 if (!(host->caps & MMC_CAP_DISABLE)) 669 return 0; 670 671 if (host->en_dis_recurs) 672 return 0; 673 674 if (--host->nesting_cnt) 675 return 0; 676 677 if (!host->enabled) 678 return 0; 679 680 if (host->disable_delay) { 681 mmc_schedule_delayed_work(&host->disable, 682 msecs_to_jiffies(host->disable_delay)); 683 return 0; 684 } else 685 return mmc_host_do_disable(host, 1); 686 } 687 EXPORT_SYMBOL(mmc_host_lazy_disable); 688 689 /** 690 * mmc_release_host - release a host 691 * @host: mmc host to release 692 * 693 * Release a MMC host, allowing others to claim the host 694 * for their operations. 695 */ 696 void mmc_release_host(struct mmc_host *host) 697 { 698 WARN_ON(!host->claimed); 699 700 mmc_host_lazy_disable(host); 701 702 mmc_do_release_host(host); 703 } 704 705 EXPORT_SYMBOL(mmc_release_host); 706 707 /* 708 * Internal function that does the actual ios call to the host driver, 709 * optionally printing some debug output. 710 */ 711 static inline void mmc_set_ios(struct mmc_host *host) 712 { 713 struct mmc_ios *ios = &host->ios; 714 715 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 716 "width %u timing %u\n", 717 mmc_hostname(host), ios->clock, ios->bus_mode, 718 ios->power_mode, ios->chip_select, ios->vdd, 719 ios->bus_width, ios->timing); 720 721 if (ios->clock > 0) 722 mmc_set_ungated(host); 723 host->ops->set_ios(host, ios); 724 } 725 726 /* 727 * Control chip select pin on a host. 728 */ 729 void mmc_set_chip_select(struct mmc_host *host, int mode) 730 { 731 host->ios.chip_select = mode; 732 mmc_set_ios(host); 733 } 734 735 /* 736 * Sets the host clock to the highest possible frequency that 737 * is below "hz". 738 */ 739 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 740 { 741 WARN_ON(hz < host->f_min); 742 743 if (hz > host->f_max) 744 hz = host->f_max; 745 746 host->ios.clock = hz; 747 mmc_set_ios(host); 748 } 749 750 #ifdef CONFIG_MMC_CLKGATE 751 /* 752 * This gates the clock by setting it to 0 Hz. 753 */ 754 void mmc_gate_clock(struct mmc_host *host) 755 { 756 unsigned long flags; 757 758 spin_lock_irqsave(&host->clk_lock, flags); 759 host->clk_old = host->ios.clock; 760 host->ios.clock = 0; 761 host->clk_gated = true; 762 spin_unlock_irqrestore(&host->clk_lock, flags); 763 mmc_set_ios(host); 764 } 765 766 /* 767 * This restores the clock from gating by using the cached 768 * clock value. 769 */ 770 void mmc_ungate_clock(struct mmc_host *host) 771 { 772 /* 773 * We should previously have gated the clock, so the clock shall 774 * be 0 here! The clock may however be 0 during initialization, 775 * when some request operations are performed before setting 776 * the frequency. When ungate is requested in that situation 777 * we just ignore the call. 778 */ 779 if (host->clk_old) { 780 BUG_ON(host->ios.clock); 781 /* This call will also set host->clk_gated to false */ 782 mmc_set_clock(host, host->clk_old); 783 } 784 } 785 786 void mmc_set_ungated(struct mmc_host *host) 787 { 788 unsigned long flags; 789 790 /* 791 * We've been given a new frequency while the clock is gated, 792 * so make sure we regard this as ungating it. 793 */ 794 spin_lock_irqsave(&host->clk_lock, flags); 795 host->clk_gated = false; 796 spin_unlock_irqrestore(&host->clk_lock, flags); 797 } 798 799 #else 800 void mmc_set_ungated(struct mmc_host *host) 801 { 802 } 803 #endif 804 805 /* 806 * Change the bus mode (open drain/push-pull) of a host. 807 */ 808 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 809 { 810 host->ios.bus_mode = mode; 811 mmc_set_ios(host); 812 } 813 814 /* 815 * Change data bus width of a host. 816 */ 817 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 818 { 819 host->ios.bus_width = width; 820 mmc_set_ios(host); 821 } 822 823 /** 824 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 825 * @vdd: voltage (mV) 826 * @low_bits: prefer low bits in boundary cases 827 * 828 * This function returns the OCR bit number according to the provided @vdd 829 * value. If conversion is not possible a negative errno value returned. 830 * 831 * Depending on the @low_bits flag the function prefers low or high OCR bits 832 * on boundary voltages. For example, 833 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 834 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 835 * 836 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 837 */ 838 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 839 { 840 const int max_bit = ilog2(MMC_VDD_35_36); 841 int bit; 842 843 if (vdd < 1650 || vdd > 3600) 844 return -EINVAL; 845 846 if (vdd >= 1650 && vdd <= 1950) 847 return ilog2(MMC_VDD_165_195); 848 849 if (low_bits) 850 vdd -= 1; 851 852 /* Base 2000 mV, step 100 mV, bit's base 8. */ 853 bit = (vdd - 2000) / 100 + 8; 854 if (bit > max_bit) 855 return max_bit; 856 return bit; 857 } 858 859 /** 860 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 861 * @vdd_min: minimum voltage value (mV) 862 * @vdd_max: maximum voltage value (mV) 863 * 864 * This function returns the OCR mask bits according to the provided @vdd_min 865 * and @vdd_max values. If conversion is not possible the function returns 0. 866 * 867 * Notes wrt boundary cases: 868 * This function sets the OCR bits for all boundary voltages, for example 869 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 870 * MMC_VDD_34_35 mask. 871 */ 872 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 873 { 874 u32 mask = 0; 875 876 if (vdd_max < vdd_min) 877 return 0; 878 879 /* Prefer high bits for the boundary vdd_max values. */ 880 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 881 if (vdd_max < 0) 882 return 0; 883 884 /* Prefer low bits for the boundary vdd_min values. */ 885 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 886 if (vdd_min < 0) 887 return 0; 888 889 /* Fill the mask, from max bit to min bit. */ 890 while (vdd_max >= vdd_min) 891 mask |= 1 << vdd_max--; 892 893 return mask; 894 } 895 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 896 897 #ifdef CONFIG_REGULATOR 898 899 /** 900 * mmc_regulator_get_ocrmask - return mask of supported voltages 901 * @supply: regulator to use 902 * 903 * This returns either a negative errno, or a mask of voltages that 904 * can be provided to MMC/SD/SDIO devices using the specified voltage 905 * regulator. This would normally be called before registering the 906 * MMC host adapter. 907 */ 908 int mmc_regulator_get_ocrmask(struct regulator *supply) 909 { 910 int result = 0; 911 int count; 912 int i; 913 914 count = regulator_count_voltages(supply); 915 if (count < 0) 916 return count; 917 918 for (i = 0; i < count; i++) { 919 int vdd_uV; 920 int vdd_mV; 921 922 vdd_uV = regulator_list_voltage(supply, i); 923 if (vdd_uV <= 0) 924 continue; 925 926 vdd_mV = vdd_uV / 1000; 927 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 928 } 929 930 return result; 931 } 932 EXPORT_SYMBOL(mmc_regulator_get_ocrmask); 933 934 /** 935 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 936 * @mmc: the host to regulate 937 * @supply: regulator to use 938 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 939 * 940 * Returns zero on success, else negative errno. 941 * 942 * MMC host drivers may use this to enable or disable a regulator using 943 * a particular supply voltage. This would normally be called from the 944 * set_ios() method. 945 */ 946 int mmc_regulator_set_ocr(struct mmc_host *mmc, 947 struct regulator *supply, 948 unsigned short vdd_bit) 949 { 950 int result = 0; 951 int min_uV, max_uV; 952 953 if (vdd_bit) { 954 int tmp; 955 int voltage; 956 957 /* REVISIT mmc_vddrange_to_ocrmask() may have set some 958 * bits this regulator doesn't quite support ... don't 959 * be too picky, most cards and regulators are OK with 960 * a 0.1V range goof (it's a small error percentage). 961 */ 962 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 963 if (tmp == 0) { 964 min_uV = 1650 * 1000; 965 max_uV = 1950 * 1000; 966 } else { 967 min_uV = 1900 * 1000 + tmp * 100 * 1000; 968 max_uV = min_uV + 100 * 1000; 969 } 970 971 /* avoid needless changes to this voltage; the regulator 972 * might not allow this operation 973 */ 974 voltage = regulator_get_voltage(supply); 975 if (voltage < 0) 976 result = voltage; 977 else if (voltage < min_uV || voltage > max_uV) 978 result = regulator_set_voltage(supply, min_uV, max_uV); 979 else 980 result = 0; 981 982 if (result == 0 && !mmc->regulator_enabled) { 983 result = regulator_enable(supply); 984 if (!result) 985 mmc->regulator_enabled = true; 986 } 987 } else if (mmc->regulator_enabled) { 988 result = regulator_disable(supply); 989 if (result == 0) 990 mmc->regulator_enabled = false; 991 } 992 993 if (result) 994 dev_err(mmc_dev(mmc), 995 "could not set regulator OCR (%d)\n", result); 996 return result; 997 } 998 EXPORT_SYMBOL(mmc_regulator_set_ocr); 999 1000 #endif /* CONFIG_REGULATOR */ 1001 1002 /* 1003 * Mask off any voltages we don't support and select 1004 * the lowest voltage 1005 */ 1006 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1007 { 1008 int bit; 1009 1010 ocr &= host->ocr_avail; 1011 1012 bit = ffs(ocr); 1013 if (bit) { 1014 bit -= 1; 1015 1016 ocr &= 3 << bit; 1017 1018 host->ios.vdd = bit; 1019 mmc_set_ios(host); 1020 } else { 1021 pr_warning("%s: host doesn't support card's voltages\n", 1022 mmc_hostname(host)); 1023 ocr = 0; 1024 } 1025 1026 return ocr; 1027 } 1028 1029 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) 1030 { 1031 struct mmc_command cmd = {0}; 1032 int err = 0; 1033 1034 BUG_ON(!host); 1035 1036 /* 1037 * Send CMD11 only if the request is to switch the card to 1038 * 1.8V signalling. 1039 */ 1040 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { 1041 cmd.opcode = SD_SWITCH_VOLTAGE; 1042 cmd.arg = 0; 1043 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1044 1045 err = mmc_wait_for_cmd(host, &cmd, 0); 1046 if (err) 1047 return err; 1048 1049 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1050 return -EIO; 1051 } 1052 1053 host->ios.signal_voltage = signal_voltage; 1054 1055 if (host->ops->start_signal_voltage_switch) 1056 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1057 1058 return err; 1059 } 1060 1061 /* 1062 * Select timing parameters for host. 1063 */ 1064 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1065 { 1066 host->ios.timing = timing; 1067 mmc_set_ios(host); 1068 } 1069 1070 /* 1071 * Select appropriate driver type for host. 1072 */ 1073 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1074 { 1075 host->ios.drv_type = drv_type; 1076 mmc_set_ios(host); 1077 } 1078 1079 /* 1080 * Apply power to the MMC stack. This is a two-stage process. 1081 * First, we enable power to the card without the clock running. 1082 * We then wait a bit for the power to stabilise. Finally, 1083 * enable the bus drivers and clock to the card. 1084 * 1085 * We must _NOT_ enable the clock prior to power stablising. 1086 * 1087 * If a host does all the power sequencing itself, ignore the 1088 * initial MMC_POWER_UP stage. 1089 */ 1090 static void mmc_power_up(struct mmc_host *host) 1091 { 1092 int bit; 1093 1094 /* If ocr is set, we use it */ 1095 if (host->ocr) 1096 bit = ffs(host->ocr) - 1; 1097 else 1098 bit = fls(host->ocr_avail) - 1; 1099 1100 host->ios.vdd = bit; 1101 if (mmc_host_is_spi(host)) { 1102 host->ios.chip_select = MMC_CS_HIGH; 1103 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1104 } else { 1105 host->ios.chip_select = MMC_CS_DONTCARE; 1106 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1107 } 1108 host->ios.power_mode = MMC_POWER_UP; 1109 host->ios.bus_width = MMC_BUS_WIDTH_1; 1110 host->ios.timing = MMC_TIMING_LEGACY; 1111 mmc_set_ios(host); 1112 1113 /* 1114 * This delay should be sufficient to allow the power supply 1115 * to reach the minimum voltage. 1116 */ 1117 mmc_delay(10); 1118 1119 host->ios.clock = host->f_init; 1120 1121 host->ios.power_mode = MMC_POWER_ON; 1122 mmc_set_ios(host); 1123 1124 /* 1125 * This delay must be at least 74 clock sizes, or 1 ms, or the 1126 * time required to reach a stable voltage. 1127 */ 1128 mmc_delay(10); 1129 } 1130 1131 static void mmc_power_off(struct mmc_host *host) 1132 { 1133 host->ios.clock = 0; 1134 host->ios.vdd = 0; 1135 1136 /* 1137 * Reset ocr mask to be the highest possible voltage supported for 1138 * this mmc host. This value will be used at next power up. 1139 */ 1140 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1141 1142 if (!mmc_host_is_spi(host)) { 1143 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1144 host->ios.chip_select = MMC_CS_DONTCARE; 1145 } 1146 host->ios.power_mode = MMC_POWER_OFF; 1147 host->ios.bus_width = MMC_BUS_WIDTH_1; 1148 host->ios.timing = MMC_TIMING_LEGACY; 1149 mmc_set_ios(host); 1150 } 1151 1152 /* 1153 * Cleanup when the last reference to the bus operator is dropped. 1154 */ 1155 static void __mmc_release_bus(struct mmc_host *host) 1156 { 1157 BUG_ON(!host); 1158 BUG_ON(host->bus_refs); 1159 BUG_ON(!host->bus_dead); 1160 1161 host->bus_ops = NULL; 1162 } 1163 1164 /* 1165 * Increase reference count of bus operator 1166 */ 1167 static inline void mmc_bus_get(struct mmc_host *host) 1168 { 1169 unsigned long flags; 1170 1171 spin_lock_irqsave(&host->lock, flags); 1172 host->bus_refs++; 1173 spin_unlock_irqrestore(&host->lock, flags); 1174 } 1175 1176 /* 1177 * Decrease reference count of bus operator and free it if 1178 * it is the last reference. 1179 */ 1180 static inline void mmc_bus_put(struct mmc_host *host) 1181 { 1182 unsigned long flags; 1183 1184 spin_lock_irqsave(&host->lock, flags); 1185 host->bus_refs--; 1186 if ((host->bus_refs == 0) && host->bus_ops) 1187 __mmc_release_bus(host); 1188 spin_unlock_irqrestore(&host->lock, flags); 1189 } 1190 1191 /* 1192 * Assign a mmc bus handler to a host. Only one bus handler may control a 1193 * host at any given time. 1194 */ 1195 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1196 { 1197 unsigned long flags; 1198 1199 BUG_ON(!host); 1200 BUG_ON(!ops); 1201 1202 WARN_ON(!host->claimed); 1203 1204 spin_lock_irqsave(&host->lock, flags); 1205 1206 BUG_ON(host->bus_ops); 1207 BUG_ON(host->bus_refs); 1208 1209 host->bus_ops = ops; 1210 host->bus_refs = 1; 1211 host->bus_dead = 0; 1212 1213 spin_unlock_irqrestore(&host->lock, flags); 1214 } 1215 1216 /* 1217 * Remove the current bus handler from a host. Assumes that there are 1218 * no interesting cards left, so the bus is powered down. 1219 */ 1220 void mmc_detach_bus(struct mmc_host *host) 1221 { 1222 unsigned long flags; 1223 1224 BUG_ON(!host); 1225 1226 WARN_ON(!host->claimed); 1227 WARN_ON(!host->bus_ops); 1228 1229 spin_lock_irqsave(&host->lock, flags); 1230 1231 host->bus_dead = 1; 1232 1233 spin_unlock_irqrestore(&host->lock, flags); 1234 1235 mmc_power_off(host); 1236 1237 mmc_bus_put(host); 1238 } 1239 1240 /** 1241 * mmc_detect_change - process change of state on a MMC socket 1242 * @host: host which changed state. 1243 * @delay: optional delay to wait before detection (jiffies) 1244 * 1245 * MMC drivers should call this when they detect a card has been 1246 * inserted or removed. The MMC layer will confirm that any 1247 * present card is still functional, and initialize any newly 1248 * inserted. 1249 */ 1250 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1251 { 1252 #ifdef CONFIG_MMC_DEBUG 1253 unsigned long flags; 1254 spin_lock_irqsave(&host->lock, flags); 1255 WARN_ON(host->removed); 1256 spin_unlock_irqrestore(&host->lock, flags); 1257 #endif 1258 1259 mmc_schedule_delayed_work(&host->detect, delay); 1260 } 1261 1262 EXPORT_SYMBOL(mmc_detect_change); 1263 1264 void mmc_init_erase(struct mmc_card *card) 1265 { 1266 unsigned int sz; 1267 1268 if (is_power_of_2(card->erase_size)) 1269 card->erase_shift = ffs(card->erase_size) - 1; 1270 else 1271 card->erase_shift = 0; 1272 1273 /* 1274 * It is possible to erase an arbitrarily large area of an SD or MMC 1275 * card. That is not desirable because it can take a long time 1276 * (minutes) potentially delaying more important I/O, and also the 1277 * timeout calculations become increasingly hugely over-estimated. 1278 * Consequently, 'pref_erase' is defined as a guide to limit erases 1279 * to that size and alignment. 1280 * 1281 * For SD cards that define Allocation Unit size, limit erases to one 1282 * Allocation Unit at a time. For MMC cards that define High Capacity 1283 * Erase Size, whether it is switched on or not, limit to that size. 1284 * Otherwise just have a stab at a good value. For modern cards it 1285 * will end up being 4MiB. Note that if the value is too small, it 1286 * can end up taking longer to erase. 1287 */ 1288 if (mmc_card_sd(card) && card->ssr.au) { 1289 card->pref_erase = card->ssr.au; 1290 card->erase_shift = ffs(card->ssr.au) - 1; 1291 } else if (card->ext_csd.hc_erase_size) { 1292 card->pref_erase = card->ext_csd.hc_erase_size; 1293 } else { 1294 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1295 if (sz < 128) 1296 card->pref_erase = 512 * 1024 / 512; 1297 else if (sz < 512) 1298 card->pref_erase = 1024 * 1024 / 512; 1299 else if (sz < 1024) 1300 card->pref_erase = 2 * 1024 * 1024 / 512; 1301 else 1302 card->pref_erase = 4 * 1024 * 1024 / 512; 1303 if (card->pref_erase < card->erase_size) 1304 card->pref_erase = card->erase_size; 1305 else { 1306 sz = card->pref_erase % card->erase_size; 1307 if (sz) 1308 card->pref_erase += card->erase_size - sz; 1309 } 1310 } 1311 } 1312 1313 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1314 unsigned int arg, unsigned int qty) 1315 { 1316 unsigned int erase_timeout; 1317 1318 if (card->ext_csd.erase_group_def & 1) { 1319 /* High Capacity Erase Group Size uses HC timeouts */ 1320 if (arg == MMC_TRIM_ARG) 1321 erase_timeout = card->ext_csd.trim_timeout; 1322 else 1323 erase_timeout = card->ext_csd.hc_erase_timeout; 1324 } else { 1325 /* CSD Erase Group Size uses write timeout */ 1326 unsigned int mult = (10 << card->csd.r2w_factor); 1327 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1328 unsigned int timeout_us; 1329 1330 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1331 if (card->csd.tacc_ns < 1000000) 1332 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1333 else 1334 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1335 1336 /* 1337 * ios.clock is only a target. The real clock rate might be 1338 * less but not that much less, so fudge it by multiplying by 2. 1339 */ 1340 timeout_clks <<= 1; 1341 timeout_us += (timeout_clks * 1000) / 1342 (mmc_host_clk_rate(card->host) / 1000); 1343 1344 erase_timeout = timeout_us / 1000; 1345 1346 /* 1347 * Theoretically, the calculation could underflow so round up 1348 * to 1ms in that case. 1349 */ 1350 if (!erase_timeout) 1351 erase_timeout = 1; 1352 } 1353 1354 /* Multiplier for secure operations */ 1355 if (arg & MMC_SECURE_ARGS) { 1356 if (arg == MMC_SECURE_ERASE_ARG) 1357 erase_timeout *= card->ext_csd.sec_erase_mult; 1358 else 1359 erase_timeout *= card->ext_csd.sec_trim_mult; 1360 } 1361 1362 erase_timeout *= qty; 1363 1364 /* 1365 * Ensure at least a 1 second timeout for SPI as per 1366 * 'mmc_set_data_timeout()' 1367 */ 1368 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1369 erase_timeout = 1000; 1370 1371 return erase_timeout; 1372 } 1373 1374 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1375 unsigned int arg, 1376 unsigned int qty) 1377 { 1378 unsigned int erase_timeout; 1379 1380 if (card->ssr.erase_timeout) { 1381 /* Erase timeout specified in SD Status Register (SSR) */ 1382 erase_timeout = card->ssr.erase_timeout * qty + 1383 card->ssr.erase_offset; 1384 } else { 1385 /* 1386 * Erase timeout not specified in SD Status Register (SSR) so 1387 * use 250ms per write block. 1388 */ 1389 erase_timeout = 250 * qty; 1390 } 1391 1392 /* Must not be less than 1 second */ 1393 if (erase_timeout < 1000) 1394 erase_timeout = 1000; 1395 1396 return erase_timeout; 1397 } 1398 1399 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1400 unsigned int arg, 1401 unsigned int qty) 1402 { 1403 if (mmc_card_sd(card)) 1404 return mmc_sd_erase_timeout(card, arg, qty); 1405 else 1406 return mmc_mmc_erase_timeout(card, arg, qty); 1407 } 1408 1409 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1410 unsigned int to, unsigned int arg) 1411 { 1412 struct mmc_command cmd = {0}; 1413 unsigned int qty = 0; 1414 int err; 1415 1416 /* 1417 * qty is used to calculate the erase timeout which depends on how many 1418 * erase groups (or allocation units in SD terminology) are affected. 1419 * We count erasing part of an erase group as one erase group. 1420 * For SD, the allocation units are always a power of 2. For MMC, the 1421 * erase group size is almost certainly also power of 2, but it does not 1422 * seem to insist on that in the JEDEC standard, so we fall back to 1423 * division in that case. SD may not specify an allocation unit size, 1424 * in which case the timeout is based on the number of write blocks. 1425 * 1426 * Note that the timeout for secure trim 2 will only be correct if the 1427 * number of erase groups specified is the same as the total of all 1428 * preceding secure trim 1 commands. Since the power may have been 1429 * lost since the secure trim 1 commands occurred, it is generally 1430 * impossible to calculate the secure trim 2 timeout correctly. 1431 */ 1432 if (card->erase_shift) 1433 qty += ((to >> card->erase_shift) - 1434 (from >> card->erase_shift)) + 1; 1435 else if (mmc_card_sd(card)) 1436 qty += to - from + 1; 1437 else 1438 qty += ((to / card->erase_size) - 1439 (from / card->erase_size)) + 1; 1440 1441 if (!mmc_card_blockaddr(card)) { 1442 from <<= 9; 1443 to <<= 9; 1444 } 1445 1446 if (mmc_card_sd(card)) 1447 cmd.opcode = SD_ERASE_WR_BLK_START; 1448 else 1449 cmd.opcode = MMC_ERASE_GROUP_START; 1450 cmd.arg = from; 1451 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1452 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1453 if (err) { 1454 printk(KERN_ERR "mmc_erase: group start error %d, " 1455 "status %#x\n", err, cmd.resp[0]); 1456 err = -EINVAL; 1457 goto out; 1458 } 1459 1460 memset(&cmd, 0, sizeof(struct mmc_command)); 1461 if (mmc_card_sd(card)) 1462 cmd.opcode = SD_ERASE_WR_BLK_END; 1463 else 1464 cmd.opcode = MMC_ERASE_GROUP_END; 1465 cmd.arg = to; 1466 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1467 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1468 if (err) { 1469 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1470 err, cmd.resp[0]); 1471 err = -EINVAL; 1472 goto out; 1473 } 1474 1475 memset(&cmd, 0, sizeof(struct mmc_command)); 1476 cmd.opcode = MMC_ERASE; 1477 cmd.arg = arg; 1478 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1479 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1480 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1481 if (err) { 1482 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1483 err, cmd.resp[0]); 1484 err = -EIO; 1485 goto out; 1486 } 1487 1488 if (mmc_host_is_spi(card->host)) 1489 goto out; 1490 1491 do { 1492 memset(&cmd, 0, sizeof(struct mmc_command)); 1493 cmd.opcode = MMC_SEND_STATUS; 1494 cmd.arg = card->rca << 16; 1495 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1496 /* Do not retry else we can't see errors */ 1497 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1498 if (err || (cmd.resp[0] & 0xFDF92000)) { 1499 printk(KERN_ERR "error %d requesting status %#x\n", 1500 err, cmd.resp[0]); 1501 err = -EIO; 1502 goto out; 1503 } 1504 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1505 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1506 out: 1507 return err; 1508 } 1509 1510 /** 1511 * mmc_erase - erase sectors. 1512 * @card: card to erase 1513 * @from: first sector to erase 1514 * @nr: number of sectors to erase 1515 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1516 * 1517 * Caller must claim host before calling this function. 1518 */ 1519 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1520 unsigned int arg) 1521 { 1522 unsigned int rem, to = from + nr; 1523 1524 if (!(card->host->caps & MMC_CAP_ERASE) || 1525 !(card->csd.cmdclass & CCC_ERASE)) 1526 return -EOPNOTSUPP; 1527 1528 if (!card->erase_size) 1529 return -EOPNOTSUPP; 1530 1531 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1532 return -EOPNOTSUPP; 1533 1534 if ((arg & MMC_SECURE_ARGS) && 1535 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1536 return -EOPNOTSUPP; 1537 1538 if ((arg & MMC_TRIM_ARGS) && 1539 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1540 return -EOPNOTSUPP; 1541 1542 if (arg == MMC_SECURE_ERASE_ARG) { 1543 if (from % card->erase_size || nr % card->erase_size) 1544 return -EINVAL; 1545 } 1546 1547 if (arg == MMC_ERASE_ARG) { 1548 rem = from % card->erase_size; 1549 if (rem) { 1550 rem = card->erase_size - rem; 1551 from += rem; 1552 if (nr > rem) 1553 nr -= rem; 1554 else 1555 return 0; 1556 } 1557 rem = nr % card->erase_size; 1558 if (rem) 1559 nr -= rem; 1560 } 1561 1562 if (nr == 0) 1563 return 0; 1564 1565 to = from + nr; 1566 1567 if (to <= from) 1568 return -EINVAL; 1569 1570 /* 'from' and 'to' are inclusive */ 1571 to -= 1; 1572 1573 return mmc_do_erase(card, from, to, arg); 1574 } 1575 EXPORT_SYMBOL(mmc_erase); 1576 1577 int mmc_can_erase(struct mmc_card *card) 1578 { 1579 if ((card->host->caps & MMC_CAP_ERASE) && 1580 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1581 return 1; 1582 return 0; 1583 } 1584 EXPORT_SYMBOL(mmc_can_erase); 1585 1586 int mmc_can_trim(struct mmc_card *card) 1587 { 1588 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1589 return 1; 1590 return 0; 1591 } 1592 EXPORT_SYMBOL(mmc_can_trim); 1593 1594 int mmc_can_secure_erase_trim(struct mmc_card *card) 1595 { 1596 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1597 return 1; 1598 return 0; 1599 } 1600 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 1601 1602 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 1603 unsigned int nr) 1604 { 1605 if (!card->erase_size) 1606 return 0; 1607 if (from % card->erase_size || nr % card->erase_size) 1608 return 0; 1609 return 1; 1610 } 1611 EXPORT_SYMBOL(mmc_erase_group_aligned); 1612 1613 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 1614 unsigned int arg) 1615 { 1616 struct mmc_host *host = card->host; 1617 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 1618 unsigned int last_timeout = 0; 1619 1620 if (card->erase_shift) 1621 max_qty = UINT_MAX >> card->erase_shift; 1622 else if (mmc_card_sd(card)) 1623 max_qty = UINT_MAX; 1624 else 1625 max_qty = UINT_MAX / card->erase_size; 1626 1627 /* Find the largest qty with an OK timeout */ 1628 do { 1629 y = 0; 1630 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 1631 timeout = mmc_erase_timeout(card, arg, qty + x); 1632 if (timeout > host->max_discard_to) 1633 break; 1634 if (timeout < last_timeout) 1635 break; 1636 last_timeout = timeout; 1637 y = x; 1638 } 1639 qty += y; 1640 } while (y); 1641 1642 if (!qty) 1643 return 0; 1644 1645 if (qty == 1) 1646 return 1; 1647 1648 /* Convert qty to sectors */ 1649 if (card->erase_shift) 1650 max_discard = --qty << card->erase_shift; 1651 else if (mmc_card_sd(card)) 1652 max_discard = qty; 1653 else 1654 max_discard = --qty * card->erase_size; 1655 1656 return max_discard; 1657 } 1658 1659 unsigned int mmc_calc_max_discard(struct mmc_card *card) 1660 { 1661 struct mmc_host *host = card->host; 1662 unsigned int max_discard, max_trim; 1663 1664 if (!host->max_discard_to) 1665 return UINT_MAX; 1666 1667 /* 1668 * Without erase_group_def set, MMC erase timeout depends on clock 1669 * frequence which can change. In that case, the best choice is 1670 * just the preferred erase size. 1671 */ 1672 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 1673 return card->pref_erase; 1674 1675 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 1676 if (mmc_can_trim(card)) { 1677 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 1678 if (max_trim < max_discard) 1679 max_discard = max_trim; 1680 } else if (max_discard < card->erase_size) { 1681 max_discard = 0; 1682 } 1683 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 1684 mmc_hostname(host), max_discard, host->max_discard_to); 1685 return max_discard; 1686 } 1687 EXPORT_SYMBOL(mmc_calc_max_discard); 1688 1689 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1690 { 1691 struct mmc_command cmd = {0}; 1692 1693 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1694 return 0; 1695 1696 cmd.opcode = MMC_SET_BLOCKLEN; 1697 cmd.arg = blocklen; 1698 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1699 return mmc_wait_for_cmd(card->host, &cmd, 5); 1700 } 1701 EXPORT_SYMBOL(mmc_set_blocklen); 1702 1703 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 1704 { 1705 host->f_init = freq; 1706 1707 #ifdef CONFIG_MMC_DEBUG 1708 pr_info("%s: %s: trying to init card at %u Hz\n", 1709 mmc_hostname(host), __func__, host->f_init); 1710 #endif 1711 mmc_power_up(host); 1712 1713 /* 1714 * sdio_reset sends CMD52 to reset card. Since we do not know 1715 * if the card is being re-initialized, just send it. CMD52 1716 * should be ignored by SD/eMMC cards. 1717 */ 1718 sdio_reset(host); 1719 mmc_go_idle(host); 1720 1721 mmc_send_if_cond(host, host->ocr_avail); 1722 1723 /* Order's important: probe SDIO, then SD, then MMC */ 1724 if (!mmc_attach_sdio(host)) 1725 return 0; 1726 if (!mmc_attach_sd(host)) 1727 return 0; 1728 if (!mmc_attach_mmc(host)) 1729 return 0; 1730 1731 mmc_power_off(host); 1732 return -EIO; 1733 } 1734 1735 void mmc_rescan(struct work_struct *work) 1736 { 1737 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 1738 struct mmc_host *host = 1739 container_of(work, struct mmc_host, detect.work); 1740 int i; 1741 1742 if (host->rescan_disable) 1743 return; 1744 1745 mmc_bus_get(host); 1746 1747 /* 1748 * if there is a _removable_ card registered, check whether it is 1749 * still present 1750 */ 1751 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1752 && !(host->caps & MMC_CAP_NONREMOVABLE)) 1753 host->bus_ops->detect(host); 1754 1755 /* 1756 * Let mmc_bus_put() free the bus/bus_ops if we've found that 1757 * the card is no longer present. 1758 */ 1759 mmc_bus_put(host); 1760 mmc_bus_get(host); 1761 1762 /* if there still is a card present, stop here */ 1763 if (host->bus_ops != NULL) { 1764 mmc_bus_put(host); 1765 goto out; 1766 } 1767 1768 /* 1769 * Only we can add a new handler, so it's safe to 1770 * release the lock here. 1771 */ 1772 mmc_bus_put(host); 1773 1774 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1775 goto out; 1776 1777 mmc_claim_host(host); 1778 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1779 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1780 break; 1781 if (freqs[i] <= host->f_min) 1782 break; 1783 } 1784 mmc_release_host(host); 1785 1786 out: 1787 if (host->caps & MMC_CAP_NEEDS_POLL) 1788 mmc_schedule_delayed_work(&host->detect, HZ); 1789 } 1790 1791 void mmc_start_host(struct mmc_host *host) 1792 { 1793 mmc_power_off(host); 1794 mmc_detect_change(host, 0); 1795 } 1796 1797 void mmc_stop_host(struct mmc_host *host) 1798 { 1799 #ifdef CONFIG_MMC_DEBUG 1800 unsigned long flags; 1801 spin_lock_irqsave(&host->lock, flags); 1802 host->removed = 1; 1803 spin_unlock_irqrestore(&host->lock, flags); 1804 #endif 1805 1806 if (host->caps & MMC_CAP_DISABLE) 1807 cancel_delayed_work(&host->disable); 1808 cancel_delayed_work_sync(&host->detect); 1809 mmc_flush_scheduled_work(); 1810 1811 /* clear pm flags now and let card drivers set them as needed */ 1812 host->pm_flags = 0; 1813 1814 mmc_bus_get(host); 1815 if (host->bus_ops && !host->bus_dead) { 1816 if (host->bus_ops->remove) 1817 host->bus_ops->remove(host); 1818 1819 mmc_claim_host(host); 1820 mmc_detach_bus(host); 1821 mmc_release_host(host); 1822 mmc_bus_put(host); 1823 return; 1824 } 1825 mmc_bus_put(host); 1826 1827 BUG_ON(host->card); 1828 1829 mmc_power_off(host); 1830 } 1831 1832 int mmc_power_save_host(struct mmc_host *host) 1833 { 1834 int ret = 0; 1835 1836 #ifdef CONFIG_MMC_DEBUG 1837 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 1838 #endif 1839 1840 mmc_bus_get(host); 1841 1842 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1843 mmc_bus_put(host); 1844 return -EINVAL; 1845 } 1846 1847 if (host->bus_ops->power_save) 1848 ret = host->bus_ops->power_save(host); 1849 1850 mmc_bus_put(host); 1851 1852 mmc_power_off(host); 1853 1854 return ret; 1855 } 1856 EXPORT_SYMBOL(mmc_power_save_host); 1857 1858 int mmc_power_restore_host(struct mmc_host *host) 1859 { 1860 int ret; 1861 1862 #ifdef CONFIG_MMC_DEBUG 1863 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 1864 #endif 1865 1866 mmc_bus_get(host); 1867 1868 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 1869 mmc_bus_put(host); 1870 return -EINVAL; 1871 } 1872 1873 mmc_power_up(host); 1874 ret = host->bus_ops->power_restore(host); 1875 1876 mmc_bus_put(host); 1877 1878 return ret; 1879 } 1880 EXPORT_SYMBOL(mmc_power_restore_host); 1881 1882 int mmc_card_awake(struct mmc_host *host) 1883 { 1884 int err = -ENOSYS; 1885 1886 mmc_bus_get(host); 1887 1888 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1889 err = host->bus_ops->awake(host); 1890 1891 mmc_bus_put(host); 1892 1893 return err; 1894 } 1895 EXPORT_SYMBOL(mmc_card_awake); 1896 1897 int mmc_card_sleep(struct mmc_host *host) 1898 { 1899 int err = -ENOSYS; 1900 1901 mmc_bus_get(host); 1902 1903 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 1904 err = host->bus_ops->sleep(host); 1905 1906 mmc_bus_put(host); 1907 1908 return err; 1909 } 1910 EXPORT_SYMBOL(mmc_card_sleep); 1911 1912 int mmc_card_can_sleep(struct mmc_host *host) 1913 { 1914 struct mmc_card *card = host->card; 1915 1916 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 1917 return 1; 1918 return 0; 1919 } 1920 EXPORT_SYMBOL(mmc_card_can_sleep); 1921 1922 #ifdef CONFIG_PM 1923 1924 /** 1925 * mmc_suspend_host - suspend a host 1926 * @host: mmc host 1927 */ 1928 int mmc_suspend_host(struct mmc_host *host) 1929 { 1930 int err = 0; 1931 1932 if (host->caps & MMC_CAP_DISABLE) 1933 cancel_delayed_work(&host->disable); 1934 cancel_delayed_work(&host->detect); 1935 mmc_flush_scheduled_work(); 1936 1937 mmc_bus_get(host); 1938 if (host->bus_ops && !host->bus_dead) { 1939 if (host->bus_ops->suspend) 1940 err = host->bus_ops->suspend(host); 1941 if (err == -ENOSYS || !host->bus_ops->resume) { 1942 /* 1943 * We simply "remove" the card in this case. 1944 * It will be redetected on resume. 1945 */ 1946 if (host->bus_ops->remove) 1947 host->bus_ops->remove(host); 1948 mmc_claim_host(host); 1949 mmc_detach_bus(host); 1950 mmc_release_host(host); 1951 host->pm_flags = 0; 1952 err = 0; 1953 } 1954 } 1955 mmc_bus_put(host); 1956 1957 if (!err && !mmc_card_keep_power(host)) 1958 mmc_power_off(host); 1959 1960 return err; 1961 } 1962 1963 EXPORT_SYMBOL(mmc_suspend_host); 1964 1965 /** 1966 * mmc_resume_host - resume a previously suspended host 1967 * @host: mmc host 1968 */ 1969 int mmc_resume_host(struct mmc_host *host) 1970 { 1971 int err = 0; 1972 1973 mmc_bus_get(host); 1974 if (host->bus_ops && !host->bus_dead) { 1975 if (!mmc_card_keep_power(host)) { 1976 mmc_power_up(host); 1977 mmc_select_voltage(host, host->ocr); 1978 /* 1979 * Tell runtime PM core we just powered up the card, 1980 * since it still believes the card is powered off. 1981 * Note that currently runtime PM is only enabled 1982 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 1983 */ 1984 if (mmc_card_sdio(host->card) && 1985 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 1986 pm_runtime_disable(&host->card->dev); 1987 pm_runtime_set_active(&host->card->dev); 1988 pm_runtime_enable(&host->card->dev); 1989 } 1990 } 1991 BUG_ON(!host->bus_ops->resume); 1992 err = host->bus_ops->resume(host); 1993 if (err) { 1994 printk(KERN_WARNING "%s: error %d during resume " 1995 "(card was removed?)\n", 1996 mmc_hostname(host), err); 1997 err = 0; 1998 } 1999 } 2000 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2001 mmc_bus_put(host); 2002 2003 return err; 2004 } 2005 EXPORT_SYMBOL(mmc_resume_host); 2006 2007 /* Do the card removal on suspend if card is assumed removeable 2008 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2009 to sync the card. 2010 */ 2011 int mmc_pm_notify(struct notifier_block *notify_block, 2012 unsigned long mode, void *unused) 2013 { 2014 struct mmc_host *host = container_of( 2015 notify_block, struct mmc_host, pm_notify); 2016 unsigned long flags; 2017 2018 2019 switch (mode) { 2020 case PM_HIBERNATION_PREPARE: 2021 case PM_SUSPEND_PREPARE: 2022 2023 spin_lock_irqsave(&host->lock, flags); 2024 host->rescan_disable = 1; 2025 spin_unlock_irqrestore(&host->lock, flags); 2026 cancel_delayed_work_sync(&host->detect); 2027 2028 if (!host->bus_ops || host->bus_ops->suspend) 2029 break; 2030 2031 mmc_claim_host(host); 2032 2033 if (host->bus_ops->remove) 2034 host->bus_ops->remove(host); 2035 2036 mmc_detach_bus(host); 2037 mmc_release_host(host); 2038 host->pm_flags = 0; 2039 break; 2040 2041 case PM_POST_SUSPEND: 2042 case PM_POST_HIBERNATION: 2043 case PM_POST_RESTORE: 2044 2045 spin_lock_irqsave(&host->lock, flags); 2046 host->rescan_disable = 0; 2047 spin_unlock_irqrestore(&host->lock, flags); 2048 mmc_detect_change(host, 0); 2049 2050 } 2051 2052 return 0; 2053 } 2054 #endif 2055 2056 static int __init mmc_init(void) 2057 { 2058 int ret; 2059 2060 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2061 if (!workqueue) 2062 return -ENOMEM; 2063 2064 ret = mmc_register_bus(); 2065 if (ret) 2066 goto destroy_workqueue; 2067 2068 ret = mmc_register_host_class(); 2069 if (ret) 2070 goto unregister_bus; 2071 2072 ret = sdio_register_bus(); 2073 if (ret) 2074 goto unregister_host_class; 2075 2076 return 0; 2077 2078 unregister_host_class: 2079 mmc_unregister_host_class(); 2080 unregister_bus: 2081 mmc_unregister_bus(); 2082 destroy_workqueue: 2083 destroy_workqueue(workqueue); 2084 2085 return ret; 2086 } 2087 2088 static void __exit mmc_exit(void) 2089 { 2090 sdio_unregister_bus(); 2091 mmc_unregister_host_class(); 2092 mmc_unregister_bus(); 2093 destroy_workqueue(workqueue); 2094 } 2095 2096 subsys_initcall(mmc_init); 2097 module_exit(mmc_exit); 2098 2099 MODULE_LICENSE("GPL"); 2100