1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm_wakeup.h> 27 #include <linux/suspend.h> 28 #include <linux/fault-inject.h> 29 #include <linux/random.h> 30 #include <linux/slab.h> 31 #include <linux/of.h> 32 33 #include <linux/mmc/card.h> 34 #include <linux/mmc/host.h> 35 #include <linux/mmc/mmc.h> 36 #include <linux/mmc/sd.h> 37 #include <linux/mmc/slot-gpio.h> 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/mmc.h> 41 42 #include "core.h" 43 #include "bus.h" 44 #include "host.h" 45 #include "sdio_bus.h" 46 #include "pwrseq.h" 47 48 #include "mmc_ops.h" 49 #include "sd_ops.h" 50 #include "sdio_ops.h" 51 52 /* If the device is not responding */ 53 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 54 55 /* 56 * Background operations can take a long time, depending on the housekeeping 57 * operations the card has to perform. 58 */ 59 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ 60 61 /* The max erase timeout, used when host->max_busy_timeout isn't specified */ 62 #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */ 63 64 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 65 66 /* 67 * Enabling software CRCs on the data blocks can be a significant (30%) 68 * performance cost, and for other reasons may not always be desired. 69 * So we allow it it to be disabled. 70 */ 71 bool use_spi_crc = 1; 72 module_param(use_spi_crc, bool, 0); 73 74 static int mmc_schedule_delayed_work(struct delayed_work *work, 75 unsigned long delay) 76 { 77 /* 78 * We use the system_freezable_wq, because of two reasons. 79 * First, it allows several works (not the same work item) to be 80 * executed simultaneously. Second, the queue becomes frozen when 81 * userspace becomes frozen during system PM. 82 */ 83 return queue_delayed_work(system_freezable_wq, work, delay); 84 } 85 86 #ifdef CONFIG_FAIL_MMC_REQUEST 87 88 /* 89 * Internal function. Inject random data errors. 90 * If mmc_data is NULL no errors are injected. 91 */ 92 static void mmc_should_fail_request(struct mmc_host *host, 93 struct mmc_request *mrq) 94 { 95 struct mmc_command *cmd = mrq->cmd; 96 struct mmc_data *data = mrq->data; 97 static const int data_errors[] = { 98 -ETIMEDOUT, 99 -EILSEQ, 100 -EIO, 101 }; 102 103 if (!data) 104 return; 105 106 if (cmd->error || data->error || 107 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 108 return; 109 110 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 111 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 112 } 113 114 #else /* CONFIG_FAIL_MMC_REQUEST */ 115 116 static inline void mmc_should_fail_request(struct mmc_host *host, 117 struct mmc_request *mrq) 118 { 119 } 120 121 #endif /* CONFIG_FAIL_MMC_REQUEST */ 122 123 static inline void mmc_complete_cmd(struct mmc_request *mrq) 124 { 125 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion)) 126 complete_all(&mrq->cmd_completion); 127 } 128 129 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq) 130 { 131 if (!mrq->cap_cmd_during_tfr) 132 return; 133 134 mmc_complete_cmd(mrq); 135 136 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n", 137 mmc_hostname(host), mrq->cmd->opcode); 138 } 139 EXPORT_SYMBOL(mmc_command_done); 140 141 /** 142 * mmc_request_done - finish processing an MMC request 143 * @host: MMC host which completed request 144 * @mrq: MMC request which request 145 * 146 * MMC drivers should call this function when they have completed 147 * their processing of a request. 148 */ 149 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 150 { 151 struct mmc_command *cmd = mrq->cmd; 152 int err = cmd->error; 153 154 /* Flag re-tuning needed on CRC errors */ 155 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && 156 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && 157 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || 158 (mrq->data && mrq->data->error == -EILSEQ) || 159 (mrq->stop && mrq->stop->error == -EILSEQ))) 160 mmc_retune_needed(host); 161 162 if (err && cmd->retries && mmc_host_is_spi(host)) { 163 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 164 cmd->retries = 0; 165 } 166 167 if (host->ongoing_mrq == mrq) 168 host->ongoing_mrq = NULL; 169 170 mmc_complete_cmd(mrq); 171 172 trace_mmc_request_done(host, mrq); 173 174 if (err && cmd->retries && !mmc_card_removed(host->card)) { 175 /* 176 * Request starter must handle retries - see 177 * mmc_wait_for_req_done(). 178 */ 179 if (mrq->done) 180 mrq->done(mrq); 181 } else { 182 mmc_should_fail_request(host, mrq); 183 184 if (!host->ongoing_mrq) 185 led_trigger_event(host->led, LED_OFF); 186 187 if (mrq->sbc) { 188 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n", 189 mmc_hostname(host), mrq->sbc->opcode, 190 mrq->sbc->error, 191 mrq->sbc->resp[0], mrq->sbc->resp[1], 192 mrq->sbc->resp[2], mrq->sbc->resp[3]); 193 } 194 195 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 196 mmc_hostname(host), cmd->opcode, err, 197 cmd->resp[0], cmd->resp[1], 198 cmd->resp[2], cmd->resp[3]); 199 200 if (mrq->data) { 201 pr_debug("%s: %d bytes transferred: %d\n", 202 mmc_hostname(host), 203 mrq->data->bytes_xfered, mrq->data->error); 204 } 205 206 if (mrq->stop) { 207 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 208 mmc_hostname(host), mrq->stop->opcode, 209 mrq->stop->error, 210 mrq->stop->resp[0], mrq->stop->resp[1], 211 mrq->stop->resp[2], mrq->stop->resp[3]); 212 } 213 214 if (mrq->done) 215 mrq->done(mrq); 216 } 217 } 218 219 EXPORT_SYMBOL(mmc_request_done); 220 221 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 222 { 223 int err; 224 225 /* Assumes host controller has been runtime resumed by mmc_claim_host */ 226 err = mmc_retune(host); 227 if (err) { 228 mrq->cmd->error = err; 229 mmc_request_done(host, mrq); 230 return; 231 } 232 233 /* 234 * For sdio rw commands we must wait for card busy otherwise some 235 * sdio devices won't work properly. 236 */ 237 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) { 238 int tries = 500; /* Wait aprox 500ms at maximum */ 239 240 while (host->ops->card_busy(host) && --tries) 241 mmc_delay(1); 242 243 if (tries == 0) { 244 mrq->cmd->error = -EBUSY; 245 mmc_request_done(host, mrq); 246 return; 247 } 248 } 249 250 if (mrq->cap_cmd_during_tfr) { 251 host->ongoing_mrq = mrq; 252 /* 253 * Retry path could come through here without having waiting on 254 * cmd_completion, so ensure it is reinitialised. 255 */ 256 reinit_completion(&mrq->cmd_completion); 257 } 258 259 trace_mmc_request_start(host, mrq); 260 261 host->ops->request(host, mrq); 262 } 263 264 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 265 { 266 #ifdef CONFIG_MMC_DEBUG 267 unsigned int i, sz; 268 struct scatterlist *sg; 269 #endif 270 mmc_retune_hold(host); 271 272 if (mmc_card_removed(host->card)) 273 return -ENOMEDIUM; 274 275 if (mrq->sbc) { 276 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 277 mmc_hostname(host), mrq->sbc->opcode, 278 mrq->sbc->arg, mrq->sbc->flags); 279 } 280 281 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 282 mmc_hostname(host), mrq->cmd->opcode, 283 mrq->cmd->arg, mrq->cmd->flags); 284 285 if (mrq->data) { 286 pr_debug("%s: blksz %d blocks %d flags %08x " 287 "tsac %d ms nsac %d\n", 288 mmc_hostname(host), mrq->data->blksz, 289 mrq->data->blocks, mrq->data->flags, 290 mrq->data->timeout_ns / 1000000, 291 mrq->data->timeout_clks); 292 } 293 294 if (mrq->stop) { 295 pr_debug("%s: CMD%u arg %08x flags %08x\n", 296 mmc_hostname(host), mrq->stop->opcode, 297 mrq->stop->arg, mrq->stop->flags); 298 } 299 300 WARN_ON(!host->claimed); 301 302 mrq->cmd->error = 0; 303 mrq->cmd->mrq = mrq; 304 if (mrq->sbc) { 305 mrq->sbc->error = 0; 306 mrq->sbc->mrq = mrq; 307 } 308 if (mrq->data) { 309 if (mrq->data->blksz > host->max_blk_size || 310 mrq->data->blocks > host->max_blk_count || 311 mrq->data->blocks * mrq->data->blksz > host->max_req_size) 312 return -EINVAL; 313 #ifdef CONFIG_MMC_DEBUG 314 sz = 0; 315 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 316 sz += sg->length; 317 if (sz != mrq->data->blocks * mrq->data->blksz) 318 return -EINVAL; 319 #endif 320 321 mrq->cmd->data = mrq->data; 322 mrq->data->error = 0; 323 mrq->data->mrq = mrq; 324 if (mrq->stop) { 325 mrq->data->stop = mrq->stop; 326 mrq->stop->error = 0; 327 mrq->stop->mrq = mrq; 328 } 329 } 330 led_trigger_event(host->led, LED_FULL); 331 __mmc_start_request(host, mrq); 332 333 return 0; 334 } 335 336 /** 337 * mmc_start_bkops - start BKOPS for supported cards 338 * @card: MMC card to start BKOPS 339 * @form_exception: A flag to indicate if this function was 340 * called due to an exception raised by the card 341 * 342 * Start background operations whenever requested. 343 * When the urgent BKOPS bit is set in a R1 command response 344 * then background operations should be started immediately. 345 */ 346 void mmc_start_bkops(struct mmc_card *card, bool from_exception) 347 { 348 int err; 349 int timeout; 350 bool use_busy_signal; 351 352 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card)) 353 return; 354 355 err = mmc_read_bkops_status(card); 356 if (err) { 357 pr_err("%s: Failed to read bkops status: %d\n", 358 mmc_hostname(card->host), err); 359 return; 360 } 361 362 if (!card->ext_csd.raw_bkops_status) 363 return; 364 365 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && 366 from_exception) 367 return; 368 369 mmc_claim_host(card->host); 370 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { 371 timeout = MMC_BKOPS_MAX_TIMEOUT; 372 use_busy_signal = true; 373 } else { 374 timeout = 0; 375 use_busy_signal = false; 376 } 377 378 mmc_retune_hold(card->host); 379 380 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 381 EXT_CSD_BKOPS_START, 1, timeout, 0, 382 use_busy_signal, true, false); 383 if (err) { 384 pr_warn("%s: Error %d starting bkops\n", 385 mmc_hostname(card->host), err); 386 mmc_retune_release(card->host); 387 goto out; 388 } 389 390 /* 391 * For urgent bkops status (LEVEL_2 and more) 392 * bkops executed synchronously, otherwise 393 * the operation is in progress 394 */ 395 if (!use_busy_signal) 396 mmc_card_set_doing_bkops(card); 397 else 398 mmc_retune_release(card->host); 399 out: 400 mmc_release_host(card->host); 401 } 402 EXPORT_SYMBOL(mmc_start_bkops); 403 404 /* 405 * mmc_wait_data_done() - done callback for data request 406 * @mrq: done data request 407 * 408 * Wakes up mmc context, passed as a callback to host controller driver 409 */ 410 static void mmc_wait_data_done(struct mmc_request *mrq) 411 { 412 struct mmc_context_info *context_info = &mrq->host->context_info; 413 414 context_info->is_done_rcv = true; 415 wake_up_interruptible(&context_info->wait); 416 } 417 418 static void mmc_wait_done(struct mmc_request *mrq) 419 { 420 complete(&mrq->completion); 421 } 422 423 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host) 424 { 425 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq); 426 427 /* 428 * If there is an ongoing transfer, wait for the command line to become 429 * available. 430 */ 431 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion)) 432 wait_for_completion(&ongoing_mrq->cmd_completion); 433 } 434 435 /* 436 *__mmc_start_data_req() - starts data request 437 * @host: MMC host to start the request 438 * @mrq: data request to start 439 * 440 * Sets the done callback to be called when request is completed by the card. 441 * Starts data mmc request execution 442 * If an ongoing transfer is already in progress, wait for the command line 443 * to become available before sending another command. 444 */ 445 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) 446 { 447 int err; 448 449 mmc_wait_ongoing_tfr_cmd(host); 450 451 mrq->done = mmc_wait_data_done; 452 mrq->host = host; 453 454 init_completion(&mrq->cmd_completion); 455 456 err = mmc_start_request(host, mrq); 457 if (err) { 458 mrq->cmd->error = err; 459 mmc_complete_cmd(mrq); 460 mmc_wait_data_done(mrq); 461 } 462 463 return err; 464 } 465 466 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 467 { 468 int err; 469 470 mmc_wait_ongoing_tfr_cmd(host); 471 472 init_completion(&mrq->completion); 473 mrq->done = mmc_wait_done; 474 475 init_completion(&mrq->cmd_completion); 476 477 err = mmc_start_request(host, mrq); 478 if (err) { 479 mrq->cmd->error = err; 480 mmc_complete_cmd(mrq); 481 complete(&mrq->completion); 482 } 483 484 return err; 485 } 486 487 /* 488 * mmc_wait_for_data_req_done() - wait for request completed 489 * @host: MMC host to prepare the command. 490 * @mrq: MMC request to wait for 491 * 492 * Blocks MMC context till host controller will ack end of data request 493 * execution or new request notification arrives from the block layer. 494 * Handles command retries. 495 * 496 * Returns enum mmc_blk_status after checking errors. 497 */ 498 static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host, 499 struct mmc_request *mrq) 500 { 501 struct mmc_command *cmd; 502 struct mmc_context_info *context_info = &host->context_info; 503 enum mmc_blk_status status; 504 505 while (1) { 506 wait_event_interruptible(context_info->wait, 507 (context_info->is_done_rcv || 508 context_info->is_new_req)); 509 510 if (context_info->is_done_rcv) { 511 context_info->is_done_rcv = false; 512 cmd = mrq->cmd; 513 514 if (!cmd->error || !cmd->retries || 515 mmc_card_removed(host->card)) { 516 status = host->areq->err_check(host->card, 517 host->areq); 518 break; /* return status */ 519 } else { 520 mmc_retune_recheck(host); 521 pr_info("%s: req failed (CMD%u): %d, retrying...\n", 522 mmc_hostname(host), 523 cmd->opcode, cmd->error); 524 cmd->retries--; 525 cmd->error = 0; 526 __mmc_start_request(host, mrq); 527 continue; /* wait for done/new event again */ 528 } 529 } 530 531 return MMC_BLK_NEW_REQUEST; 532 } 533 mmc_retune_release(host); 534 return status; 535 } 536 537 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) 538 { 539 struct mmc_command *cmd; 540 541 while (1) { 542 wait_for_completion(&mrq->completion); 543 544 cmd = mrq->cmd; 545 546 /* 547 * If host has timed out waiting for the sanitize 548 * to complete, card might be still in programming state 549 * so let's try to bring the card out of programming 550 * state. 551 */ 552 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { 553 if (!mmc_interrupt_hpi(host->card)) { 554 pr_warn("%s: %s: Interrupted sanitize\n", 555 mmc_hostname(host), __func__); 556 cmd->error = 0; 557 break; 558 } else { 559 pr_err("%s: %s: Failed to interrupt sanitize\n", 560 mmc_hostname(host), __func__); 561 } 562 } 563 if (!cmd->error || !cmd->retries || 564 mmc_card_removed(host->card)) 565 break; 566 567 mmc_retune_recheck(host); 568 569 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 570 mmc_hostname(host), cmd->opcode, cmd->error); 571 cmd->retries--; 572 cmd->error = 0; 573 __mmc_start_request(host, mrq); 574 } 575 576 mmc_retune_release(host); 577 } 578 EXPORT_SYMBOL(mmc_wait_for_req_done); 579 580 /** 581 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done 582 * @host: MMC host 583 * @mrq: MMC request 584 * 585 * mmc_is_req_done() is used with requests that have 586 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after 587 * starting a request and before waiting for it to complete. That is, 588 * either in between calls to mmc_start_req(), or after mmc_wait_for_req() 589 * and before mmc_wait_for_req_done(). If it is called at other times the 590 * result is not meaningful. 591 */ 592 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq) 593 { 594 if (host->areq) 595 return host->context_info.is_done_rcv; 596 else 597 return completion_done(&mrq->completion); 598 } 599 EXPORT_SYMBOL(mmc_is_req_done); 600 601 /** 602 * mmc_pre_req - Prepare for a new request 603 * @host: MMC host to prepare command 604 * @mrq: MMC request to prepare for 605 * 606 * mmc_pre_req() is called in prior to mmc_start_req() to let 607 * host prepare for the new request. Preparation of a request may be 608 * performed while another request is running on the host. 609 */ 610 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq) 611 { 612 if (host->ops->pre_req) 613 host->ops->pre_req(host, mrq); 614 } 615 616 /** 617 * mmc_post_req - Post process a completed request 618 * @host: MMC host to post process command 619 * @mrq: MMC request to post process for 620 * @err: Error, if non zero, clean up any resources made in pre_req 621 * 622 * Let the host post process a completed request. Post processing of 623 * a request may be performed while another reuqest is running. 624 */ 625 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 626 int err) 627 { 628 if (host->ops->post_req) 629 host->ops->post_req(host, mrq, err); 630 } 631 632 /** 633 * mmc_start_req - start a non-blocking request 634 * @host: MMC host to start command 635 * @areq: async request to start 636 * @error: out parameter returns 0 for success, otherwise non zero 637 * 638 * Start a new MMC custom command request for a host. 639 * If there is on ongoing async request wait for completion 640 * of that request and start the new one and return. 641 * Does not wait for the new request to complete. 642 * 643 * Returns the completed request, NULL in case of none completed. 644 * Wait for the an ongoing request (previoulsy started) to complete and 645 * return the completed request. If there is no ongoing request, NULL 646 * is returned without waiting. NULL is not an error condition. 647 */ 648 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 649 struct mmc_async_req *areq, 650 enum mmc_blk_status *ret_stat) 651 { 652 enum mmc_blk_status status = MMC_BLK_SUCCESS; 653 int start_err = 0; 654 struct mmc_async_req *data = host->areq; 655 656 /* Prepare a new request */ 657 if (areq) 658 mmc_pre_req(host, areq->mrq); 659 660 if (host->areq) { 661 status = mmc_wait_for_data_req_done(host, host->areq->mrq); 662 if (status == MMC_BLK_NEW_REQUEST) { 663 if (ret_stat) 664 *ret_stat = status; 665 /* 666 * The previous request was not completed, 667 * nothing to return 668 */ 669 return NULL; 670 } 671 /* 672 * Check BKOPS urgency for each R1 response 673 */ 674 if (host->card && mmc_card_mmc(host->card) && 675 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || 676 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && 677 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) { 678 679 /* Cancel the prepared request */ 680 if (areq) 681 mmc_post_req(host, areq->mrq, -EINVAL); 682 683 mmc_start_bkops(host->card, true); 684 685 /* prepare the request again */ 686 if (areq) 687 mmc_pre_req(host, areq->mrq); 688 } 689 } 690 691 if (status == MMC_BLK_SUCCESS && areq) 692 start_err = __mmc_start_data_req(host, areq->mrq); 693 694 if (host->areq) 695 mmc_post_req(host, host->areq->mrq, 0); 696 697 /* Cancel a prepared request if it was not started. */ 698 if ((status != MMC_BLK_SUCCESS || start_err) && areq) 699 mmc_post_req(host, areq->mrq, -EINVAL); 700 701 if (status != MMC_BLK_SUCCESS) 702 host->areq = NULL; 703 else 704 host->areq = areq; 705 706 if (ret_stat) 707 *ret_stat = status; 708 return data; 709 } 710 EXPORT_SYMBOL(mmc_start_req); 711 712 /** 713 * mmc_wait_for_req - start a request and wait for completion 714 * @host: MMC host to start command 715 * @mrq: MMC request to start 716 * 717 * Start a new MMC custom command request for a host, and wait 718 * for the command to complete. In the case of 'cap_cmd_during_tfr' 719 * requests, the transfer is ongoing and the caller can issue further 720 * commands that do not use the data lines, and then wait by calling 721 * mmc_wait_for_req_done(). 722 * Does not attempt to parse the response. 723 */ 724 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 725 { 726 __mmc_start_req(host, mrq); 727 728 if (!mrq->cap_cmd_during_tfr) 729 mmc_wait_for_req_done(host, mrq); 730 } 731 EXPORT_SYMBOL(mmc_wait_for_req); 732 733 /** 734 * mmc_interrupt_hpi - Issue for High priority Interrupt 735 * @card: the MMC card associated with the HPI transfer 736 * 737 * Issued High Priority Interrupt, and check for card status 738 * until out-of prg-state. 739 */ 740 int mmc_interrupt_hpi(struct mmc_card *card) 741 { 742 int err; 743 u32 status; 744 unsigned long prg_wait; 745 746 if (!card->ext_csd.hpi_en) { 747 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 748 return 1; 749 } 750 751 mmc_claim_host(card->host); 752 err = mmc_send_status(card, &status); 753 if (err) { 754 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 755 goto out; 756 } 757 758 switch (R1_CURRENT_STATE(status)) { 759 case R1_STATE_IDLE: 760 case R1_STATE_READY: 761 case R1_STATE_STBY: 762 case R1_STATE_TRAN: 763 /* 764 * In idle and transfer states, HPI is not needed and the caller 765 * can issue the next intended command immediately 766 */ 767 goto out; 768 case R1_STATE_PRG: 769 break; 770 default: 771 /* In all other states, it's illegal to issue HPI */ 772 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 773 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 774 err = -EINVAL; 775 goto out; 776 } 777 778 err = mmc_send_hpi_cmd(card, &status); 779 if (err) 780 goto out; 781 782 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 783 do { 784 err = mmc_send_status(card, &status); 785 786 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 787 break; 788 if (time_after(jiffies, prg_wait)) 789 err = -ETIMEDOUT; 790 } while (!err); 791 792 out: 793 mmc_release_host(card->host); 794 return err; 795 } 796 EXPORT_SYMBOL(mmc_interrupt_hpi); 797 798 /** 799 * mmc_wait_for_cmd - start a command and wait for completion 800 * @host: MMC host to start command 801 * @cmd: MMC command to start 802 * @retries: maximum number of retries 803 * 804 * Start a new MMC command for a host, and wait for the command 805 * to complete. Return any error that occurred while the command 806 * was executing. Do not attempt to parse the response. 807 */ 808 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 809 { 810 struct mmc_request mrq = {NULL}; 811 812 WARN_ON(!host->claimed); 813 814 memset(cmd->resp, 0, sizeof(cmd->resp)); 815 cmd->retries = retries; 816 817 mrq.cmd = cmd; 818 cmd->data = NULL; 819 820 mmc_wait_for_req(host, &mrq); 821 822 return cmd->error; 823 } 824 825 EXPORT_SYMBOL(mmc_wait_for_cmd); 826 827 /** 828 * mmc_stop_bkops - stop ongoing BKOPS 829 * @card: MMC card to check BKOPS 830 * 831 * Send HPI command to stop ongoing background operations to 832 * allow rapid servicing of foreground operations, e.g. read/ 833 * writes. Wait until the card comes out of the programming state 834 * to avoid errors in servicing read/write requests. 835 */ 836 int mmc_stop_bkops(struct mmc_card *card) 837 { 838 int err = 0; 839 840 err = mmc_interrupt_hpi(card); 841 842 /* 843 * If err is EINVAL, we can't issue an HPI. 844 * It should complete the BKOPS. 845 */ 846 if (!err || (err == -EINVAL)) { 847 mmc_card_clr_doing_bkops(card); 848 mmc_retune_release(card->host); 849 err = 0; 850 } 851 852 return err; 853 } 854 EXPORT_SYMBOL(mmc_stop_bkops); 855 856 int mmc_read_bkops_status(struct mmc_card *card) 857 { 858 int err; 859 u8 *ext_csd; 860 861 mmc_claim_host(card->host); 862 err = mmc_get_ext_csd(card, &ext_csd); 863 mmc_release_host(card->host); 864 if (err) 865 return err; 866 867 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 868 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 869 kfree(ext_csd); 870 return 0; 871 } 872 EXPORT_SYMBOL(mmc_read_bkops_status); 873 874 /** 875 * mmc_set_data_timeout - set the timeout for a data command 876 * @data: data phase for command 877 * @card: the MMC card associated with the data transfer 878 * 879 * Computes the data timeout parameters according to the 880 * correct algorithm given the card type. 881 */ 882 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 883 { 884 unsigned int mult; 885 886 /* 887 * SDIO cards only define an upper 1 s limit on access. 888 */ 889 if (mmc_card_sdio(card)) { 890 data->timeout_ns = 1000000000; 891 data->timeout_clks = 0; 892 return; 893 } 894 895 /* 896 * SD cards use a 100 multiplier rather than 10 897 */ 898 mult = mmc_card_sd(card) ? 100 : 10; 899 900 /* 901 * Scale up the multiplier (and therefore the timeout) by 902 * the r2w factor for writes. 903 */ 904 if (data->flags & MMC_DATA_WRITE) 905 mult <<= card->csd.r2w_factor; 906 907 data->timeout_ns = card->csd.tacc_ns * mult; 908 data->timeout_clks = card->csd.tacc_clks * mult; 909 910 /* 911 * SD cards also have an upper limit on the timeout. 912 */ 913 if (mmc_card_sd(card)) { 914 unsigned int timeout_us, limit_us; 915 916 timeout_us = data->timeout_ns / 1000; 917 if (card->host->ios.clock) 918 timeout_us += data->timeout_clks * 1000 / 919 (card->host->ios.clock / 1000); 920 921 if (data->flags & MMC_DATA_WRITE) 922 /* 923 * The MMC spec "It is strongly recommended 924 * for hosts to implement more than 500ms 925 * timeout value even if the card indicates 926 * the 250ms maximum busy length." Even the 927 * previous value of 300ms is known to be 928 * insufficient for some cards. 929 */ 930 limit_us = 3000000; 931 else 932 limit_us = 100000; 933 934 /* 935 * SDHC cards always use these fixed values. 936 */ 937 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 938 data->timeout_ns = limit_us * 1000; 939 data->timeout_clks = 0; 940 } 941 942 /* assign limit value if invalid */ 943 if (timeout_us == 0) 944 data->timeout_ns = limit_us * 1000; 945 } 946 947 /* 948 * Some cards require longer data read timeout than indicated in CSD. 949 * Address this by setting the read timeout to a "reasonably high" 950 * value. For the cards tested, 600ms has proven enough. If necessary, 951 * this value can be increased if other problematic cards require this. 952 */ 953 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 954 data->timeout_ns = 600000000; 955 data->timeout_clks = 0; 956 } 957 958 /* 959 * Some cards need very high timeouts if driven in SPI mode. 960 * The worst observed timeout was 900ms after writing a 961 * continuous stream of data until the internal logic 962 * overflowed. 963 */ 964 if (mmc_host_is_spi(card->host)) { 965 if (data->flags & MMC_DATA_WRITE) { 966 if (data->timeout_ns < 1000000000) 967 data->timeout_ns = 1000000000; /* 1s */ 968 } else { 969 if (data->timeout_ns < 100000000) 970 data->timeout_ns = 100000000; /* 100ms */ 971 } 972 } 973 } 974 EXPORT_SYMBOL(mmc_set_data_timeout); 975 976 /** 977 * mmc_align_data_size - pads a transfer size to a more optimal value 978 * @card: the MMC card associated with the data transfer 979 * @sz: original transfer size 980 * 981 * Pads the original data size with a number of extra bytes in 982 * order to avoid controller bugs and/or performance hits 983 * (e.g. some controllers revert to PIO for certain sizes). 984 * 985 * Returns the improved size, which might be unmodified. 986 * 987 * Note that this function is only relevant when issuing a 988 * single scatter gather entry. 989 */ 990 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 991 { 992 /* 993 * FIXME: We don't have a system for the controller to tell 994 * the core about its problems yet, so for now we just 32-bit 995 * align the size. 996 */ 997 sz = ((sz + 3) / 4) * 4; 998 999 return sz; 1000 } 1001 EXPORT_SYMBOL(mmc_align_data_size); 1002 1003 /** 1004 * __mmc_claim_host - exclusively claim a host 1005 * @host: mmc host to claim 1006 * @abort: whether or not the operation should be aborted 1007 * 1008 * Claim a host for a set of operations. If @abort is non null and 1009 * dereference a non-zero value then this will return prematurely with 1010 * that non-zero value without acquiring the lock. Returns zero 1011 * with the lock held otherwise. 1012 */ 1013 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 1014 { 1015 DECLARE_WAITQUEUE(wait, current); 1016 unsigned long flags; 1017 int stop; 1018 bool pm = false; 1019 1020 might_sleep(); 1021 1022 add_wait_queue(&host->wq, &wait); 1023 spin_lock_irqsave(&host->lock, flags); 1024 while (1) { 1025 set_current_state(TASK_UNINTERRUPTIBLE); 1026 stop = abort ? atomic_read(abort) : 0; 1027 if (stop || !host->claimed || host->claimer == current) 1028 break; 1029 spin_unlock_irqrestore(&host->lock, flags); 1030 schedule(); 1031 spin_lock_irqsave(&host->lock, flags); 1032 } 1033 set_current_state(TASK_RUNNING); 1034 if (!stop) { 1035 host->claimed = 1; 1036 host->claimer = current; 1037 host->claim_cnt += 1; 1038 if (host->claim_cnt == 1) 1039 pm = true; 1040 } else 1041 wake_up(&host->wq); 1042 spin_unlock_irqrestore(&host->lock, flags); 1043 remove_wait_queue(&host->wq, &wait); 1044 1045 if (pm) 1046 pm_runtime_get_sync(mmc_dev(host)); 1047 1048 return stop; 1049 } 1050 EXPORT_SYMBOL(__mmc_claim_host); 1051 1052 /** 1053 * mmc_release_host - release a host 1054 * @host: mmc host to release 1055 * 1056 * Release a MMC host, allowing others to claim the host 1057 * for their operations. 1058 */ 1059 void mmc_release_host(struct mmc_host *host) 1060 { 1061 unsigned long flags; 1062 1063 WARN_ON(!host->claimed); 1064 1065 spin_lock_irqsave(&host->lock, flags); 1066 if (--host->claim_cnt) { 1067 /* Release for nested claim */ 1068 spin_unlock_irqrestore(&host->lock, flags); 1069 } else { 1070 host->claimed = 0; 1071 host->claimer = NULL; 1072 spin_unlock_irqrestore(&host->lock, flags); 1073 wake_up(&host->wq); 1074 pm_runtime_mark_last_busy(mmc_dev(host)); 1075 pm_runtime_put_autosuspend(mmc_dev(host)); 1076 } 1077 } 1078 EXPORT_SYMBOL(mmc_release_host); 1079 1080 /* 1081 * This is a helper function, which fetches a runtime pm reference for the 1082 * card device and also claims the host. 1083 */ 1084 void mmc_get_card(struct mmc_card *card) 1085 { 1086 pm_runtime_get_sync(&card->dev); 1087 mmc_claim_host(card->host); 1088 } 1089 EXPORT_SYMBOL(mmc_get_card); 1090 1091 /* 1092 * This is a helper function, which releases the host and drops the runtime 1093 * pm reference for the card device. 1094 */ 1095 void mmc_put_card(struct mmc_card *card) 1096 { 1097 mmc_release_host(card->host); 1098 pm_runtime_mark_last_busy(&card->dev); 1099 pm_runtime_put_autosuspend(&card->dev); 1100 } 1101 EXPORT_SYMBOL(mmc_put_card); 1102 1103 /* 1104 * Internal function that does the actual ios call to the host driver, 1105 * optionally printing some debug output. 1106 */ 1107 static inline void mmc_set_ios(struct mmc_host *host) 1108 { 1109 struct mmc_ios *ios = &host->ios; 1110 1111 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 1112 "width %u timing %u\n", 1113 mmc_hostname(host), ios->clock, ios->bus_mode, 1114 ios->power_mode, ios->chip_select, ios->vdd, 1115 1 << ios->bus_width, ios->timing); 1116 1117 host->ops->set_ios(host, ios); 1118 } 1119 1120 /* 1121 * Control chip select pin on a host. 1122 */ 1123 void mmc_set_chip_select(struct mmc_host *host, int mode) 1124 { 1125 host->ios.chip_select = mode; 1126 mmc_set_ios(host); 1127 } 1128 1129 /* 1130 * Sets the host clock to the highest possible frequency that 1131 * is below "hz". 1132 */ 1133 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 1134 { 1135 WARN_ON(hz && hz < host->f_min); 1136 1137 if (hz > host->f_max) 1138 hz = host->f_max; 1139 1140 host->ios.clock = hz; 1141 mmc_set_ios(host); 1142 } 1143 1144 int mmc_execute_tuning(struct mmc_card *card) 1145 { 1146 struct mmc_host *host = card->host; 1147 u32 opcode; 1148 int err; 1149 1150 if (!host->ops->execute_tuning) 1151 return 0; 1152 1153 if (mmc_card_mmc(card)) 1154 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1155 else 1156 opcode = MMC_SEND_TUNING_BLOCK; 1157 1158 err = host->ops->execute_tuning(host, opcode); 1159 1160 if (err) 1161 pr_err("%s: tuning execution failed: %d\n", 1162 mmc_hostname(host), err); 1163 else 1164 mmc_retune_enable(host); 1165 1166 return err; 1167 } 1168 1169 /* 1170 * Change the bus mode (open drain/push-pull) of a host. 1171 */ 1172 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 1173 { 1174 host->ios.bus_mode = mode; 1175 mmc_set_ios(host); 1176 } 1177 1178 /* 1179 * Change data bus width of a host. 1180 */ 1181 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 1182 { 1183 host->ios.bus_width = width; 1184 mmc_set_ios(host); 1185 } 1186 1187 /* 1188 * Set initial state after a power cycle or a hw_reset. 1189 */ 1190 void mmc_set_initial_state(struct mmc_host *host) 1191 { 1192 mmc_retune_disable(host); 1193 1194 if (mmc_host_is_spi(host)) 1195 host->ios.chip_select = MMC_CS_HIGH; 1196 else 1197 host->ios.chip_select = MMC_CS_DONTCARE; 1198 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1199 host->ios.bus_width = MMC_BUS_WIDTH_1; 1200 host->ios.timing = MMC_TIMING_LEGACY; 1201 host->ios.drv_type = 0; 1202 host->ios.enhanced_strobe = false; 1203 1204 /* 1205 * Make sure we are in non-enhanced strobe mode before we 1206 * actually enable it in ext_csd. 1207 */ 1208 if ((host->caps2 & MMC_CAP2_HS400_ES) && 1209 host->ops->hs400_enhanced_strobe) 1210 host->ops->hs400_enhanced_strobe(host, &host->ios); 1211 1212 mmc_set_ios(host); 1213 } 1214 1215 /** 1216 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 1217 * @vdd: voltage (mV) 1218 * @low_bits: prefer low bits in boundary cases 1219 * 1220 * This function returns the OCR bit number according to the provided @vdd 1221 * value. If conversion is not possible a negative errno value returned. 1222 * 1223 * Depending on the @low_bits flag the function prefers low or high OCR bits 1224 * on boundary voltages. For example, 1225 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 1226 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 1227 * 1228 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 1229 */ 1230 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 1231 { 1232 const int max_bit = ilog2(MMC_VDD_35_36); 1233 int bit; 1234 1235 if (vdd < 1650 || vdd > 3600) 1236 return -EINVAL; 1237 1238 if (vdd >= 1650 && vdd <= 1950) 1239 return ilog2(MMC_VDD_165_195); 1240 1241 if (low_bits) 1242 vdd -= 1; 1243 1244 /* Base 2000 mV, step 100 mV, bit's base 8. */ 1245 bit = (vdd - 2000) / 100 + 8; 1246 if (bit > max_bit) 1247 return max_bit; 1248 return bit; 1249 } 1250 1251 /** 1252 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 1253 * @vdd_min: minimum voltage value (mV) 1254 * @vdd_max: maximum voltage value (mV) 1255 * 1256 * This function returns the OCR mask bits according to the provided @vdd_min 1257 * and @vdd_max values. If conversion is not possible the function returns 0. 1258 * 1259 * Notes wrt boundary cases: 1260 * This function sets the OCR bits for all boundary voltages, for example 1261 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1262 * MMC_VDD_34_35 mask. 1263 */ 1264 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1265 { 1266 u32 mask = 0; 1267 1268 if (vdd_max < vdd_min) 1269 return 0; 1270 1271 /* Prefer high bits for the boundary vdd_max values. */ 1272 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1273 if (vdd_max < 0) 1274 return 0; 1275 1276 /* Prefer low bits for the boundary vdd_min values. */ 1277 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1278 if (vdd_min < 0) 1279 return 0; 1280 1281 /* Fill the mask, from max bit to min bit. */ 1282 while (vdd_max >= vdd_min) 1283 mask |= 1 << vdd_max--; 1284 1285 return mask; 1286 } 1287 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1288 1289 #ifdef CONFIG_OF 1290 1291 /** 1292 * mmc_of_parse_voltage - return mask of supported voltages 1293 * @np: The device node need to be parsed. 1294 * @mask: mask of voltages available for MMC/SD/SDIO 1295 * 1296 * Parse the "voltage-ranges" DT property, returning zero if it is not 1297 * found, negative errno if the voltage-range specification is invalid, 1298 * or one if the voltage-range is specified and successfully parsed. 1299 */ 1300 int mmc_of_parse_voltage(struct device_node *np, u32 *mask) 1301 { 1302 const u32 *voltage_ranges; 1303 int num_ranges, i; 1304 1305 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); 1306 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; 1307 if (!voltage_ranges) { 1308 pr_debug("%s: voltage-ranges unspecified\n", np->full_name); 1309 return 0; 1310 } 1311 if (!num_ranges) { 1312 pr_err("%s: voltage-ranges empty\n", np->full_name); 1313 return -EINVAL; 1314 } 1315 1316 for (i = 0; i < num_ranges; i++) { 1317 const int j = i * 2; 1318 u32 ocr_mask; 1319 1320 ocr_mask = mmc_vddrange_to_ocrmask( 1321 be32_to_cpu(voltage_ranges[j]), 1322 be32_to_cpu(voltage_ranges[j + 1])); 1323 if (!ocr_mask) { 1324 pr_err("%s: voltage-range #%d is invalid\n", 1325 np->full_name, i); 1326 return -EINVAL; 1327 } 1328 *mask |= ocr_mask; 1329 } 1330 1331 return 1; 1332 } 1333 EXPORT_SYMBOL(mmc_of_parse_voltage); 1334 1335 #endif /* CONFIG_OF */ 1336 1337 static int mmc_of_get_func_num(struct device_node *node) 1338 { 1339 u32 reg; 1340 int ret; 1341 1342 ret = of_property_read_u32(node, "reg", ®); 1343 if (ret < 0) 1344 return ret; 1345 1346 return reg; 1347 } 1348 1349 struct device_node *mmc_of_find_child_device(struct mmc_host *host, 1350 unsigned func_num) 1351 { 1352 struct device_node *node; 1353 1354 if (!host->parent || !host->parent->of_node) 1355 return NULL; 1356 1357 for_each_child_of_node(host->parent->of_node, node) { 1358 if (mmc_of_get_func_num(node) == func_num) 1359 return node; 1360 } 1361 1362 return NULL; 1363 } 1364 1365 #ifdef CONFIG_REGULATOR 1366 1367 /** 1368 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage 1369 * @vdd_bit: OCR bit number 1370 * @min_uV: minimum voltage value (mV) 1371 * @max_uV: maximum voltage value (mV) 1372 * 1373 * This function returns the voltage range according to the provided OCR 1374 * bit number. If conversion is not possible a negative errno value returned. 1375 */ 1376 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV) 1377 { 1378 int tmp; 1379 1380 if (!vdd_bit) 1381 return -EINVAL; 1382 1383 /* 1384 * REVISIT mmc_vddrange_to_ocrmask() may have set some 1385 * bits this regulator doesn't quite support ... don't 1386 * be too picky, most cards and regulators are OK with 1387 * a 0.1V range goof (it's a small error percentage). 1388 */ 1389 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1390 if (tmp == 0) { 1391 *min_uV = 1650 * 1000; 1392 *max_uV = 1950 * 1000; 1393 } else { 1394 *min_uV = 1900 * 1000 + tmp * 100 * 1000; 1395 *max_uV = *min_uV + 100 * 1000; 1396 } 1397 1398 return 0; 1399 } 1400 1401 /** 1402 * mmc_regulator_get_ocrmask - return mask of supported voltages 1403 * @supply: regulator to use 1404 * 1405 * This returns either a negative errno, or a mask of voltages that 1406 * can be provided to MMC/SD/SDIO devices using the specified voltage 1407 * regulator. This would normally be called before registering the 1408 * MMC host adapter. 1409 */ 1410 int mmc_regulator_get_ocrmask(struct regulator *supply) 1411 { 1412 int result = 0; 1413 int count; 1414 int i; 1415 int vdd_uV; 1416 int vdd_mV; 1417 1418 count = regulator_count_voltages(supply); 1419 if (count < 0) 1420 return count; 1421 1422 for (i = 0; i < count; i++) { 1423 vdd_uV = regulator_list_voltage(supply, i); 1424 if (vdd_uV <= 0) 1425 continue; 1426 1427 vdd_mV = vdd_uV / 1000; 1428 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1429 } 1430 1431 if (!result) { 1432 vdd_uV = regulator_get_voltage(supply); 1433 if (vdd_uV <= 0) 1434 return vdd_uV; 1435 1436 vdd_mV = vdd_uV / 1000; 1437 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1438 } 1439 1440 return result; 1441 } 1442 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); 1443 1444 /** 1445 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1446 * @mmc: the host to regulate 1447 * @supply: regulator to use 1448 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1449 * 1450 * Returns zero on success, else negative errno. 1451 * 1452 * MMC host drivers may use this to enable or disable a regulator using 1453 * a particular supply voltage. This would normally be called from the 1454 * set_ios() method. 1455 */ 1456 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1457 struct regulator *supply, 1458 unsigned short vdd_bit) 1459 { 1460 int result = 0; 1461 int min_uV, max_uV; 1462 1463 if (vdd_bit) { 1464 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV); 1465 1466 result = regulator_set_voltage(supply, min_uV, max_uV); 1467 if (result == 0 && !mmc->regulator_enabled) { 1468 result = regulator_enable(supply); 1469 if (!result) 1470 mmc->regulator_enabled = true; 1471 } 1472 } else if (mmc->regulator_enabled) { 1473 result = regulator_disable(supply); 1474 if (result == 0) 1475 mmc->regulator_enabled = false; 1476 } 1477 1478 if (result) 1479 dev_err(mmc_dev(mmc), 1480 "could not set regulator OCR (%d)\n", result); 1481 return result; 1482 } 1483 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1484 1485 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, 1486 int min_uV, int target_uV, 1487 int max_uV) 1488 { 1489 /* 1490 * Check if supported first to avoid errors since we may try several 1491 * signal levels during power up and don't want to show errors. 1492 */ 1493 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) 1494 return -EINVAL; 1495 1496 return regulator_set_voltage_triplet(regulator, min_uV, target_uV, 1497 max_uV); 1498 } 1499 1500 /** 1501 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios 1502 * 1503 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. 1504 * That will match the behavior of old boards where VQMMC and VMMC were supplied 1505 * by the same supply. The Bus Operating conditions for 3.3V signaling in the 1506 * SD card spec also define VQMMC in terms of VMMC. 1507 * If this is not possible we'll try the full 2.7-3.6V of the spec. 1508 * 1509 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the 1510 * requested voltage. This is definitely a good idea for UHS where there's a 1511 * separate regulator on the card that's trying to make 1.8V and it's best if 1512 * we match. 1513 * 1514 * This function is expected to be used by a controller's 1515 * start_signal_voltage_switch() function. 1516 */ 1517 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) 1518 { 1519 struct device *dev = mmc_dev(mmc); 1520 int ret, volt, min_uV, max_uV; 1521 1522 /* If no vqmmc supply then we can't change the voltage */ 1523 if (IS_ERR(mmc->supply.vqmmc)) 1524 return -EINVAL; 1525 1526 switch (ios->signal_voltage) { 1527 case MMC_SIGNAL_VOLTAGE_120: 1528 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1529 1100000, 1200000, 1300000); 1530 case MMC_SIGNAL_VOLTAGE_180: 1531 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1532 1700000, 1800000, 1950000); 1533 case MMC_SIGNAL_VOLTAGE_330: 1534 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); 1535 if (ret < 0) 1536 return ret; 1537 1538 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", 1539 __func__, volt, max_uV); 1540 1541 min_uV = max(volt - 300000, 2700000); 1542 max_uV = min(max_uV + 200000, 3600000); 1543 1544 /* 1545 * Due to a limitation in the current implementation of 1546 * regulator_set_voltage_triplet() which is taking the lowest 1547 * voltage possible if below the target, search for a suitable 1548 * voltage in two steps and try to stay close to vmmc 1549 * with a 0.3V tolerance at first. 1550 */ 1551 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1552 min_uV, volt, max_uV)) 1553 return 0; 1554 1555 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1556 2700000, volt, 3600000); 1557 default: 1558 return -EINVAL; 1559 } 1560 } 1561 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); 1562 1563 #endif /* CONFIG_REGULATOR */ 1564 1565 int mmc_regulator_get_supply(struct mmc_host *mmc) 1566 { 1567 struct device *dev = mmc_dev(mmc); 1568 int ret; 1569 1570 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); 1571 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); 1572 1573 if (IS_ERR(mmc->supply.vmmc)) { 1574 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) 1575 return -EPROBE_DEFER; 1576 dev_dbg(dev, "No vmmc regulator found\n"); 1577 } else { 1578 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); 1579 if (ret > 0) 1580 mmc->ocr_avail = ret; 1581 else 1582 dev_warn(dev, "Failed getting OCR mask: %d\n", ret); 1583 } 1584 1585 if (IS_ERR(mmc->supply.vqmmc)) { 1586 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) 1587 return -EPROBE_DEFER; 1588 dev_dbg(dev, "No vqmmc regulator found\n"); 1589 } 1590 1591 return 0; 1592 } 1593 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1594 1595 /* 1596 * Mask off any voltages we don't support and select 1597 * the lowest voltage 1598 */ 1599 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1600 { 1601 int bit; 1602 1603 /* 1604 * Sanity check the voltages that the card claims to 1605 * support. 1606 */ 1607 if (ocr & 0x7F) { 1608 dev_warn(mmc_dev(host), 1609 "card claims to support voltages below defined range\n"); 1610 ocr &= ~0x7F; 1611 } 1612 1613 ocr &= host->ocr_avail; 1614 if (!ocr) { 1615 dev_warn(mmc_dev(host), "no support for card's volts\n"); 1616 return 0; 1617 } 1618 1619 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { 1620 bit = ffs(ocr) - 1; 1621 ocr &= 3 << bit; 1622 mmc_power_cycle(host, ocr); 1623 } else { 1624 bit = fls(ocr) - 1; 1625 ocr &= 3 << bit; 1626 if (bit != host->ios.vdd) 1627 dev_warn(mmc_dev(host), "exceeding card's volts\n"); 1628 } 1629 1630 return ocr; 1631 } 1632 1633 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1634 { 1635 int err = 0; 1636 int old_signal_voltage = host->ios.signal_voltage; 1637 1638 host->ios.signal_voltage = signal_voltage; 1639 if (host->ops->start_signal_voltage_switch) 1640 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1641 1642 if (err) 1643 host->ios.signal_voltage = old_signal_voltage; 1644 1645 return err; 1646 1647 } 1648 1649 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) 1650 { 1651 struct mmc_command cmd = {0}; 1652 int err = 0; 1653 u32 clock; 1654 1655 /* 1656 * Send CMD11 only if the request is to switch the card to 1657 * 1.8V signalling. 1658 */ 1659 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1660 return __mmc_set_signal_voltage(host, signal_voltage); 1661 1662 /* 1663 * If we cannot switch voltages, return failure so the caller 1664 * can continue without UHS mode 1665 */ 1666 if (!host->ops->start_signal_voltage_switch) 1667 return -EPERM; 1668 if (!host->ops->card_busy) 1669 pr_warn("%s: cannot verify signal voltage switch\n", 1670 mmc_hostname(host)); 1671 1672 cmd.opcode = SD_SWITCH_VOLTAGE; 1673 cmd.arg = 0; 1674 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1675 1676 err = mmc_wait_for_cmd(host, &cmd, 0); 1677 if (err) 1678 return err; 1679 1680 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1681 return -EIO; 1682 1683 /* 1684 * The card should drive cmd and dat[0:3] low immediately 1685 * after the response of cmd11, but wait 1 ms to be sure 1686 */ 1687 mmc_delay(1); 1688 if (host->ops->card_busy && !host->ops->card_busy(host)) { 1689 err = -EAGAIN; 1690 goto power_cycle; 1691 } 1692 /* 1693 * During a signal voltage level switch, the clock must be gated 1694 * for 5 ms according to the SD spec 1695 */ 1696 clock = host->ios.clock; 1697 host->ios.clock = 0; 1698 mmc_set_ios(host); 1699 1700 if (__mmc_set_signal_voltage(host, signal_voltage)) { 1701 /* 1702 * Voltages may not have been switched, but we've already 1703 * sent CMD11, so a power cycle is required anyway 1704 */ 1705 err = -EAGAIN; 1706 goto power_cycle; 1707 } 1708 1709 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ 1710 mmc_delay(10); 1711 host->ios.clock = clock; 1712 mmc_set_ios(host); 1713 1714 /* Wait for at least 1 ms according to spec */ 1715 mmc_delay(1); 1716 1717 /* 1718 * Failure to switch is indicated by the card holding 1719 * dat[0:3] low 1720 */ 1721 if (host->ops->card_busy && host->ops->card_busy(host)) 1722 err = -EAGAIN; 1723 1724 power_cycle: 1725 if (err) { 1726 pr_debug("%s: Signal voltage switch failed, " 1727 "power cycling card\n", mmc_hostname(host)); 1728 mmc_power_cycle(host, ocr); 1729 } 1730 1731 return err; 1732 } 1733 1734 /* 1735 * Select timing parameters for host. 1736 */ 1737 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1738 { 1739 host->ios.timing = timing; 1740 mmc_set_ios(host); 1741 } 1742 1743 /* 1744 * Select appropriate driver type for host. 1745 */ 1746 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1747 { 1748 host->ios.drv_type = drv_type; 1749 mmc_set_ios(host); 1750 } 1751 1752 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, 1753 int card_drv_type, int *drv_type) 1754 { 1755 struct mmc_host *host = card->host; 1756 int host_drv_type = SD_DRIVER_TYPE_B; 1757 1758 *drv_type = 0; 1759 1760 if (!host->ops->select_drive_strength) 1761 return 0; 1762 1763 /* Use SD definition of driver strength for hosts */ 1764 if (host->caps & MMC_CAP_DRIVER_TYPE_A) 1765 host_drv_type |= SD_DRIVER_TYPE_A; 1766 1767 if (host->caps & MMC_CAP_DRIVER_TYPE_C) 1768 host_drv_type |= SD_DRIVER_TYPE_C; 1769 1770 if (host->caps & MMC_CAP_DRIVER_TYPE_D) 1771 host_drv_type |= SD_DRIVER_TYPE_D; 1772 1773 /* 1774 * The drive strength that the hardware can support 1775 * depends on the board design. Pass the appropriate 1776 * information and let the hardware specific code 1777 * return what is possible given the options 1778 */ 1779 return host->ops->select_drive_strength(card, max_dtr, 1780 host_drv_type, 1781 card_drv_type, 1782 drv_type); 1783 } 1784 1785 /* 1786 * Apply power to the MMC stack. This is a two-stage process. 1787 * First, we enable power to the card without the clock running. 1788 * We then wait a bit for the power to stabilise. Finally, 1789 * enable the bus drivers and clock to the card. 1790 * 1791 * We must _NOT_ enable the clock prior to power stablising. 1792 * 1793 * If a host does all the power sequencing itself, ignore the 1794 * initial MMC_POWER_UP stage. 1795 */ 1796 void mmc_power_up(struct mmc_host *host, u32 ocr) 1797 { 1798 if (host->ios.power_mode == MMC_POWER_ON) 1799 return; 1800 1801 mmc_pwrseq_pre_power_on(host); 1802 1803 host->ios.vdd = fls(ocr) - 1; 1804 host->ios.power_mode = MMC_POWER_UP; 1805 /* Set initial state and call mmc_set_ios */ 1806 mmc_set_initial_state(host); 1807 1808 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ 1809 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) 1810 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); 1811 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) 1812 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); 1813 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) 1814 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); 1815 1816 /* 1817 * This delay should be sufficient to allow the power supply 1818 * to reach the minimum voltage. 1819 */ 1820 mmc_delay(10); 1821 1822 mmc_pwrseq_post_power_on(host); 1823 1824 host->ios.clock = host->f_init; 1825 1826 host->ios.power_mode = MMC_POWER_ON; 1827 mmc_set_ios(host); 1828 1829 /* 1830 * This delay must be at least 74 clock sizes, or 1 ms, or the 1831 * time required to reach a stable voltage. 1832 */ 1833 mmc_delay(10); 1834 } 1835 1836 void mmc_power_off(struct mmc_host *host) 1837 { 1838 if (host->ios.power_mode == MMC_POWER_OFF) 1839 return; 1840 1841 mmc_pwrseq_power_off(host); 1842 1843 host->ios.clock = 0; 1844 host->ios.vdd = 0; 1845 1846 host->ios.power_mode = MMC_POWER_OFF; 1847 /* Set initial state and call mmc_set_ios */ 1848 mmc_set_initial_state(host); 1849 1850 /* 1851 * Some configurations, such as the 802.11 SDIO card in the OLPC 1852 * XO-1.5, require a short delay after poweroff before the card 1853 * can be successfully turned on again. 1854 */ 1855 mmc_delay(1); 1856 } 1857 1858 void mmc_power_cycle(struct mmc_host *host, u32 ocr) 1859 { 1860 mmc_power_off(host); 1861 /* Wait at least 1 ms according to SD spec */ 1862 mmc_delay(1); 1863 mmc_power_up(host, ocr); 1864 } 1865 1866 /* 1867 * Cleanup when the last reference to the bus operator is dropped. 1868 */ 1869 static void __mmc_release_bus(struct mmc_host *host) 1870 { 1871 WARN_ON(!host->bus_dead); 1872 1873 host->bus_ops = NULL; 1874 } 1875 1876 /* 1877 * Increase reference count of bus operator 1878 */ 1879 static inline void mmc_bus_get(struct mmc_host *host) 1880 { 1881 unsigned long flags; 1882 1883 spin_lock_irqsave(&host->lock, flags); 1884 host->bus_refs++; 1885 spin_unlock_irqrestore(&host->lock, flags); 1886 } 1887 1888 /* 1889 * Decrease reference count of bus operator and free it if 1890 * it is the last reference. 1891 */ 1892 static inline void mmc_bus_put(struct mmc_host *host) 1893 { 1894 unsigned long flags; 1895 1896 spin_lock_irqsave(&host->lock, flags); 1897 host->bus_refs--; 1898 if ((host->bus_refs == 0) && host->bus_ops) 1899 __mmc_release_bus(host); 1900 spin_unlock_irqrestore(&host->lock, flags); 1901 } 1902 1903 /* 1904 * Assign a mmc bus handler to a host. Only one bus handler may control a 1905 * host at any given time. 1906 */ 1907 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1908 { 1909 unsigned long flags; 1910 1911 WARN_ON(!host->claimed); 1912 1913 spin_lock_irqsave(&host->lock, flags); 1914 1915 WARN_ON(host->bus_ops); 1916 WARN_ON(host->bus_refs); 1917 1918 host->bus_ops = ops; 1919 host->bus_refs = 1; 1920 host->bus_dead = 0; 1921 1922 spin_unlock_irqrestore(&host->lock, flags); 1923 } 1924 1925 /* 1926 * Remove the current bus handler from a host. 1927 */ 1928 void mmc_detach_bus(struct mmc_host *host) 1929 { 1930 unsigned long flags; 1931 1932 WARN_ON(!host->claimed); 1933 WARN_ON(!host->bus_ops); 1934 1935 spin_lock_irqsave(&host->lock, flags); 1936 1937 host->bus_dead = 1; 1938 1939 spin_unlock_irqrestore(&host->lock, flags); 1940 1941 mmc_bus_put(host); 1942 } 1943 1944 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, 1945 bool cd_irq) 1946 { 1947 #ifdef CONFIG_MMC_DEBUG 1948 unsigned long flags; 1949 spin_lock_irqsave(&host->lock, flags); 1950 WARN_ON(host->removed); 1951 spin_unlock_irqrestore(&host->lock, flags); 1952 #endif 1953 1954 /* 1955 * If the device is configured as wakeup, we prevent a new sleep for 1956 * 5 s to give provision for user space to consume the event. 1957 */ 1958 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && 1959 device_can_wakeup(mmc_dev(host))) 1960 pm_wakeup_event(mmc_dev(host), 5000); 1961 1962 host->detect_change = 1; 1963 mmc_schedule_delayed_work(&host->detect, delay); 1964 } 1965 1966 /** 1967 * mmc_detect_change - process change of state on a MMC socket 1968 * @host: host which changed state. 1969 * @delay: optional delay to wait before detection (jiffies) 1970 * 1971 * MMC drivers should call this when they detect a card has been 1972 * inserted or removed. The MMC layer will confirm that any 1973 * present card is still functional, and initialize any newly 1974 * inserted. 1975 */ 1976 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1977 { 1978 _mmc_detect_change(host, delay, true); 1979 } 1980 EXPORT_SYMBOL(mmc_detect_change); 1981 1982 void mmc_init_erase(struct mmc_card *card) 1983 { 1984 unsigned int sz; 1985 1986 if (is_power_of_2(card->erase_size)) 1987 card->erase_shift = ffs(card->erase_size) - 1; 1988 else 1989 card->erase_shift = 0; 1990 1991 /* 1992 * It is possible to erase an arbitrarily large area of an SD or MMC 1993 * card. That is not desirable because it can take a long time 1994 * (minutes) potentially delaying more important I/O, and also the 1995 * timeout calculations become increasingly hugely over-estimated. 1996 * Consequently, 'pref_erase' is defined as a guide to limit erases 1997 * to that size and alignment. 1998 * 1999 * For SD cards that define Allocation Unit size, limit erases to one 2000 * Allocation Unit at a time. 2001 * For MMC, have a stab at ai good value and for modern cards it will 2002 * end up being 4MiB. Note that if the value is too small, it can end 2003 * up taking longer to erase. Also note, erase_size is already set to 2004 * High Capacity Erase Size if available when this function is called. 2005 */ 2006 if (mmc_card_sd(card) && card->ssr.au) { 2007 card->pref_erase = card->ssr.au; 2008 card->erase_shift = ffs(card->ssr.au) - 1; 2009 } else if (card->erase_size) { 2010 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 2011 if (sz < 128) 2012 card->pref_erase = 512 * 1024 / 512; 2013 else if (sz < 512) 2014 card->pref_erase = 1024 * 1024 / 512; 2015 else if (sz < 1024) 2016 card->pref_erase = 2 * 1024 * 1024 / 512; 2017 else 2018 card->pref_erase = 4 * 1024 * 1024 / 512; 2019 if (card->pref_erase < card->erase_size) 2020 card->pref_erase = card->erase_size; 2021 else { 2022 sz = card->pref_erase % card->erase_size; 2023 if (sz) 2024 card->pref_erase += card->erase_size - sz; 2025 } 2026 } else 2027 card->pref_erase = 0; 2028 } 2029 2030 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 2031 unsigned int arg, unsigned int qty) 2032 { 2033 unsigned int erase_timeout; 2034 2035 if (arg == MMC_DISCARD_ARG || 2036 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 2037 erase_timeout = card->ext_csd.trim_timeout; 2038 } else if (card->ext_csd.erase_group_def & 1) { 2039 /* High Capacity Erase Group Size uses HC timeouts */ 2040 if (arg == MMC_TRIM_ARG) 2041 erase_timeout = card->ext_csd.trim_timeout; 2042 else 2043 erase_timeout = card->ext_csd.hc_erase_timeout; 2044 } else { 2045 /* CSD Erase Group Size uses write timeout */ 2046 unsigned int mult = (10 << card->csd.r2w_factor); 2047 unsigned int timeout_clks = card->csd.tacc_clks * mult; 2048 unsigned int timeout_us; 2049 2050 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 2051 if (card->csd.tacc_ns < 1000000) 2052 timeout_us = (card->csd.tacc_ns * mult) / 1000; 2053 else 2054 timeout_us = (card->csd.tacc_ns / 1000) * mult; 2055 2056 /* 2057 * ios.clock is only a target. The real clock rate might be 2058 * less but not that much less, so fudge it by multiplying by 2. 2059 */ 2060 timeout_clks <<= 1; 2061 timeout_us += (timeout_clks * 1000) / 2062 (card->host->ios.clock / 1000); 2063 2064 erase_timeout = timeout_us / 1000; 2065 2066 /* 2067 * Theoretically, the calculation could underflow so round up 2068 * to 1ms in that case. 2069 */ 2070 if (!erase_timeout) 2071 erase_timeout = 1; 2072 } 2073 2074 /* Multiplier for secure operations */ 2075 if (arg & MMC_SECURE_ARGS) { 2076 if (arg == MMC_SECURE_ERASE_ARG) 2077 erase_timeout *= card->ext_csd.sec_erase_mult; 2078 else 2079 erase_timeout *= card->ext_csd.sec_trim_mult; 2080 } 2081 2082 erase_timeout *= qty; 2083 2084 /* 2085 * Ensure at least a 1 second timeout for SPI as per 2086 * 'mmc_set_data_timeout()' 2087 */ 2088 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 2089 erase_timeout = 1000; 2090 2091 return erase_timeout; 2092 } 2093 2094 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 2095 unsigned int arg, 2096 unsigned int qty) 2097 { 2098 unsigned int erase_timeout; 2099 2100 if (card->ssr.erase_timeout) { 2101 /* Erase timeout specified in SD Status Register (SSR) */ 2102 erase_timeout = card->ssr.erase_timeout * qty + 2103 card->ssr.erase_offset; 2104 } else { 2105 /* 2106 * Erase timeout not specified in SD Status Register (SSR) so 2107 * use 250ms per write block. 2108 */ 2109 erase_timeout = 250 * qty; 2110 } 2111 2112 /* Must not be less than 1 second */ 2113 if (erase_timeout < 1000) 2114 erase_timeout = 1000; 2115 2116 return erase_timeout; 2117 } 2118 2119 static unsigned int mmc_erase_timeout(struct mmc_card *card, 2120 unsigned int arg, 2121 unsigned int qty) 2122 { 2123 if (mmc_card_sd(card)) 2124 return mmc_sd_erase_timeout(card, arg, qty); 2125 else 2126 return mmc_mmc_erase_timeout(card, arg, qty); 2127 } 2128 2129 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 2130 unsigned int to, unsigned int arg) 2131 { 2132 struct mmc_command cmd = {0}; 2133 unsigned int qty = 0, busy_timeout = 0; 2134 bool use_r1b_resp = false; 2135 unsigned long timeout; 2136 int err; 2137 2138 mmc_retune_hold(card->host); 2139 2140 /* 2141 * qty is used to calculate the erase timeout which depends on how many 2142 * erase groups (or allocation units in SD terminology) are affected. 2143 * We count erasing part of an erase group as one erase group. 2144 * For SD, the allocation units are always a power of 2. For MMC, the 2145 * erase group size is almost certainly also power of 2, but it does not 2146 * seem to insist on that in the JEDEC standard, so we fall back to 2147 * division in that case. SD may not specify an allocation unit size, 2148 * in which case the timeout is based on the number of write blocks. 2149 * 2150 * Note that the timeout for secure trim 2 will only be correct if the 2151 * number of erase groups specified is the same as the total of all 2152 * preceding secure trim 1 commands. Since the power may have been 2153 * lost since the secure trim 1 commands occurred, it is generally 2154 * impossible to calculate the secure trim 2 timeout correctly. 2155 */ 2156 if (card->erase_shift) 2157 qty += ((to >> card->erase_shift) - 2158 (from >> card->erase_shift)) + 1; 2159 else if (mmc_card_sd(card)) 2160 qty += to - from + 1; 2161 else 2162 qty += ((to / card->erase_size) - 2163 (from / card->erase_size)) + 1; 2164 2165 if (!mmc_card_blockaddr(card)) { 2166 from <<= 9; 2167 to <<= 9; 2168 } 2169 2170 if (mmc_card_sd(card)) 2171 cmd.opcode = SD_ERASE_WR_BLK_START; 2172 else 2173 cmd.opcode = MMC_ERASE_GROUP_START; 2174 cmd.arg = from; 2175 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2176 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2177 if (err) { 2178 pr_err("mmc_erase: group start error %d, " 2179 "status %#x\n", err, cmd.resp[0]); 2180 err = -EIO; 2181 goto out; 2182 } 2183 2184 memset(&cmd, 0, sizeof(struct mmc_command)); 2185 if (mmc_card_sd(card)) 2186 cmd.opcode = SD_ERASE_WR_BLK_END; 2187 else 2188 cmd.opcode = MMC_ERASE_GROUP_END; 2189 cmd.arg = to; 2190 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2191 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2192 if (err) { 2193 pr_err("mmc_erase: group end error %d, status %#x\n", 2194 err, cmd.resp[0]); 2195 err = -EIO; 2196 goto out; 2197 } 2198 2199 memset(&cmd, 0, sizeof(struct mmc_command)); 2200 cmd.opcode = MMC_ERASE; 2201 cmd.arg = arg; 2202 busy_timeout = mmc_erase_timeout(card, arg, qty); 2203 /* 2204 * If the host controller supports busy signalling and the timeout for 2205 * the erase operation does not exceed the max_busy_timeout, we should 2206 * use R1B response. Or we need to prevent the host from doing hw busy 2207 * detection, which is done by converting to a R1 response instead. 2208 */ 2209 if (card->host->max_busy_timeout && 2210 busy_timeout > card->host->max_busy_timeout) { 2211 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2212 } else { 2213 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 2214 cmd.busy_timeout = busy_timeout; 2215 use_r1b_resp = true; 2216 } 2217 2218 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2219 if (err) { 2220 pr_err("mmc_erase: erase error %d, status %#x\n", 2221 err, cmd.resp[0]); 2222 err = -EIO; 2223 goto out; 2224 } 2225 2226 if (mmc_host_is_spi(card->host)) 2227 goto out; 2228 2229 /* 2230 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling 2231 * shall be avoided. 2232 */ 2233 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) 2234 goto out; 2235 2236 timeout = jiffies + msecs_to_jiffies(busy_timeout); 2237 do { 2238 memset(&cmd, 0, sizeof(struct mmc_command)); 2239 cmd.opcode = MMC_SEND_STATUS; 2240 cmd.arg = card->rca << 16; 2241 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 2242 /* Do not retry else we can't see errors */ 2243 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2244 if (err || (cmd.resp[0] & 0xFDF92000)) { 2245 pr_err("error %d requesting status %#x\n", 2246 err, cmd.resp[0]); 2247 err = -EIO; 2248 goto out; 2249 } 2250 2251 /* Timeout if the device never becomes ready for data and 2252 * never leaves the program state. 2253 */ 2254 if (time_after(jiffies, timeout)) { 2255 pr_err("%s: Card stuck in programming state! %s\n", 2256 mmc_hostname(card->host), __func__); 2257 err = -EIO; 2258 goto out; 2259 } 2260 2261 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 2262 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); 2263 out: 2264 mmc_retune_release(card->host); 2265 return err; 2266 } 2267 2268 static unsigned int mmc_align_erase_size(struct mmc_card *card, 2269 unsigned int *from, 2270 unsigned int *to, 2271 unsigned int nr) 2272 { 2273 unsigned int from_new = *from, nr_new = nr, rem; 2274 2275 /* 2276 * When the 'card->erase_size' is power of 2, we can use round_up/down() 2277 * to align the erase size efficiently. 2278 */ 2279 if (is_power_of_2(card->erase_size)) { 2280 unsigned int temp = from_new; 2281 2282 from_new = round_up(temp, card->erase_size); 2283 rem = from_new - temp; 2284 2285 if (nr_new > rem) 2286 nr_new -= rem; 2287 else 2288 return 0; 2289 2290 nr_new = round_down(nr_new, card->erase_size); 2291 } else { 2292 rem = from_new % card->erase_size; 2293 if (rem) { 2294 rem = card->erase_size - rem; 2295 from_new += rem; 2296 if (nr_new > rem) 2297 nr_new -= rem; 2298 else 2299 return 0; 2300 } 2301 2302 rem = nr_new % card->erase_size; 2303 if (rem) 2304 nr_new -= rem; 2305 } 2306 2307 if (nr_new == 0) 2308 return 0; 2309 2310 *to = from_new + nr_new; 2311 *from = from_new; 2312 2313 return nr_new; 2314 } 2315 2316 /** 2317 * mmc_erase - erase sectors. 2318 * @card: card to erase 2319 * @from: first sector to erase 2320 * @nr: number of sectors to erase 2321 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 2322 * 2323 * Caller must claim host before calling this function. 2324 */ 2325 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 2326 unsigned int arg) 2327 { 2328 unsigned int rem, to = from + nr; 2329 int err; 2330 2331 if (!(card->host->caps & MMC_CAP_ERASE) || 2332 !(card->csd.cmdclass & CCC_ERASE)) 2333 return -EOPNOTSUPP; 2334 2335 if (!card->erase_size) 2336 return -EOPNOTSUPP; 2337 2338 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 2339 return -EOPNOTSUPP; 2340 2341 if ((arg & MMC_SECURE_ARGS) && 2342 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 2343 return -EOPNOTSUPP; 2344 2345 if ((arg & MMC_TRIM_ARGS) && 2346 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 2347 return -EOPNOTSUPP; 2348 2349 if (arg == MMC_SECURE_ERASE_ARG) { 2350 if (from % card->erase_size || nr % card->erase_size) 2351 return -EINVAL; 2352 } 2353 2354 if (arg == MMC_ERASE_ARG) 2355 nr = mmc_align_erase_size(card, &from, &to, nr); 2356 2357 if (nr == 0) 2358 return 0; 2359 2360 if (to <= from) 2361 return -EINVAL; 2362 2363 /* 'from' and 'to' are inclusive */ 2364 to -= 1; 2365 2366 /* 2367 * Special case where only one erase-group fits in the timeout budget: 2368 * If the region crosses an erase-group boundary on this particular 2369 * case, we will be trimming more than one erase-group which, does not 2370 * fit in the timeout budget of the controller, so we need to split it 2371 * and call mmc_do_erase() twice if necessary. This special case is 2372 * identified by the card->eg_boundary flag. 2373 */ 2374 rem = card->erase_size - (from % card->erase_size); 2375 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) { 2376 err = mmc_do_erase(card, from, from + rem - 1, arg); 2377 from += rem; 2378 if ((err) || (to <= from)) 2379 return err; 2380 } 2381 2382 return mmc_do_erase(card, from, to, arg); 2383 } 2384 EXPORT_SYMBOL(mmc_erase); 2385 2386 int mmc_can_erase(struct mmc_card *card) 2387 { 2388 if ((card->host->caps & MMC_CAP_ERASE) && 2389 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 2390 return 1; 2391 return 0; 2392 } 2393 EXPORT_SYMBOL(mmc_can_erase); 2394 2395 int mmc_can_trim(struct mmc_card *card) 2396 { 2397 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) && 2398 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN))) 2399 return 1; 2400 return 0; 2401 } 2402 EXPORT_SYMBOL(mmc_can_trim); 2403 2404 int mmc_can_discard(struct mmc_card *card) 2405 { 2406 /* 2407 * As there's no way to detect the discard support bit at v4.5 2408 * use the s/w feature support filed. 2409 */ 2410 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 2411 return 1; 2412 return 0; 2413 } 2414 EXPORT_SYMBOL(mmc_can_discard); 2415 2416 int mmc_can_sanitize(struct mmc_card *card) 2417 { 2418 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 2419 return 0; 2420 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 2421 return 1; 2422 return 0; 2423 } 2424 EXPORT_SYMBOL(mmc_can_sanitize); 2425 2426 int mmc_can_secure_erase_trim(struct mmc_card *card) 2427 { 2428 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) && 2429 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) 2430 return 1; 2431 return 0; 2432 } 2433 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 2434 2435 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 2436 unsigned int nr) 2437 { 2438 if (!card->erase_size) 2439 return 0; 2440 if (from % card->erase_size || nr % card->erase_size) 2441 return 0; 2442 return 1; 2443 } 2444 EXPORT_SYMBOL(mmc_erase_group_aligned); 2445 2446 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 2447 unsigned int arg) 2448 { 2449 struct mmc_host *host = card->host; 2450 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout; 2451 unsigned int last_timeout = 0; 2452 unsigned int max_busy_timeout = host->max_busy_timeout ? 2453 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS; 2454 2455 if (card->erase_shift) { 2456 max_qty = UINT_MAX >> card->erase_shift; 2457 min_qty = card->pref_erase >> card->erase_shift; 2458 } else if (mmc_card_sd(card)) { 2459 max_qty = UINT_MAX; 2460 min_qty = card->pref_erase; 2461 } else { 2462 max_qty = UINT_MAX / card->erase_size; 2463 min_qty = card->pref_erase / card->erase_size; 2464 } 2465 2466 /* 2467 * We should not only use 'host->max_busy_timeout' as the limitation 2468 * when deciding the max discard sectors. We should set a balance value 2469 * to improve the erase speed, and it can not get too long timeout at 2470 * the same time. 2471 * 2472 * Here we set 'card->pref_erase' as the minimal discard sectors no 2473 * matter what size of 'host->max_busy_timeout', but if the 2474 * 'host->max_busy_timeout' is large enough for more discard sectors, 2475 * then we can continue to increase the max discard sectors until we 2476 * get a balance value. In cases when the 'host->max_busy_timeout' 2477 * isn't specified, use the default max erase timeout. 2478 */ 2479 do { 2480 y = 0; 2481 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 2482 timeout = mmc_erase_timeout(card, arg, qty + x); 2483 2484 if (qty + x > min_qty && timeout > max_busy_timeout) 2485 break; 2486 2487 if (timeout < last_timeout) 2488 break; 2489 last_timeout = timeout; 2490 y = x; 2491 } 2492 qty += y; 2493 } while (y); 2494 2495 if (!qty) 2496 return 0; 2497 2498 /* 2499 * When specifying a sector range to trim, chances are we might cross 2500 * an erase-group boundary even if the amount of sectors is less than 2501 * one erase-group. 2502 * If we can only fit one erase-group in the controller timeout budget, 2503 * we have to care that erase-group boundaries are not crossed by a 2504 * single trim operation. We flag that special case with "eg_boundary". 2505 * In all other cases we can just decrement qty and pretend that we 2506 * always touch (qty + 1) erase-groups as a simple optimization. 2507 */ 2508 if (qty == 1) 2509 card->eg_boundary = 1; 2510 else 2511 qty--; 2512 2513 /* Convert qty to sectors */ 2514 if (card->erase_shift) 2515 max_discard = qty << card->erase_shift; 2516 else if (mmc_card_sd(card)) 2517 max_discard = qty + 1; 2518 else 2519 max_discard = qty * card->erase_size; 2520 2521 return max_discard; 2522 } 2523 2524 unsigned int mmc_calc_max_discard(struct mmc_card *card) 2525 { 2526 struct mmc_host *host = card->host; 2527 unsigned int max_discard, max_trim; 2528 2529 /* 2530 * Without erase_group_def set, MMC erase timeout depends on clock 2531 * frequence which can change. In that case, the best choice is 2532 * just the preferred erase size. 2533 */ 2534 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 2535 return card->pref_erase; 2536 2537 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 2538 if (mmc_can_trim(card)) { 2539 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 2540 if (max_trim < max_discard) 2541 max_discard = max_trim; 2542 } else if (max_discard < card->erase_size) { 2543 max_discard = 0; 2544 } 2545 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 2546 mmc_hostname(host), max_discard, host->max_busy_timeout ? 2547 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS); 2548 return max_discard; 2549 } 2550 EXPORT_SYMBOL(mmc_calc_max_discard); 2551 2552 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 2553 { 2554 struct mmc_command cmd = {0}; 2555 2556 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) || 2557 mmc_card_hs400(card) || mmc_card_hs400es(card)) 2558 return 0; 2559 2560 cmd.opcode = MMC_SET_BLOCKLEN; 2561 cmd.arg = blocklen; 2562 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2563 return mmc_wait_for_cmd(card->host, &cmd, 5); 2564 } 2565 EXPORT_SYMBOL(mmc_set_blocklen); 2566 2567 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, 2568 bool is_rel_write) 2569 { 2570 struct mmc_command cmd = {0}; 2571 2572 cmd.opcode = MMC_SET_BLOCK_COUNT; 2573 cmd.arg = blockcount & 0x0000FFFF; 2574 if (is_rel_write) 2575 cmd.arg |= 1 << 31; 2576 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2577 return mmc_wait_for_cmd(card->host, &cmd, 5); 2578 } 2579 EXPORT_SYMBOL(mmc_set_blockcount); 2580 2581 static void mmc_hw_reset_for_init(struct mmc_host *host) 2582 { 2583 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2584 return; 2585 host->ops->hw_reset(host); 2586 } 2587 2588 int mmc_hw_reset(struct mmc_host *host) 2589 { 2590 int ret; 2591 2592 if (!host->card) 2593 return -EINVAL; 2594 2595 mmc_bus_get(host); 2596 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) { 2597 mmc_bus_put(host); 2598 return -EOPNOTSUPP; 2599 } 2600 2601 ret = host->bus_ops->reset(host); 2602 mmc_bus_put(host); 2603 2604 if (ret) 2605 pr_warn("%s: tried to reset card, got error %d\n", 2606 mmc_hostname(host), ret); 2607 2608 return ret; 2609 } 2610 EXPORT_SYMBOL(mmc_hw_reset); 2611 2612 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2613 { 2614 host->f_init = freq; 2615 2616 #ifdef CONFIG_MMC_DEBUG 2617 pr_info("%s: %s: trying to init card at %u Hz\n", 2618 mmc_hostname(host), __func__, host->f_init); 2619 #endif 2620 mmc_power_up(host, host->ocr_avail); 2621 2622 /* 2623 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2624 * do a hardware reset if possible. 2625 */ 2626 mmc_hw_reset_for_init(host); 2627 2628 /* 2629 * sdio_reset sends CMD52 to reset card. Since we do not know 2630 * if the card is being re-initialized, just send it. CMD52 2631 * should be ignored by SD/eMMC cards. 2632 * Skip it if we already know that we do not support SDIO commands 2633 */ 2634 if (!(host->caps2 & MMC_CAP2_NO_SDIO)) 2635 sdio_reset(host); 2636 2637 mmc_go_idle(host); 2638 2639 if (!(host->caps2 & MMC_CAP2_NO_SD)) 2640 mmc_send_if_cond(host, host->ocr_avail); 2641 2642 /* Order's important: probe SDIO, then SD, then MMC */ 2643 if (!(host->caps2 & MMC_CAP2_NO_SDIO)) 2644 if (!mmc_attach_sdio(host)) 2645 return 0; 2646 2647 if (!(host->caps2 & MMC_CAP2_NO_SD)) 2648 if (!mmc_attach_sd(host)) 2649 return 0; 2650 2651 if (!(host->caps2 & MMC_CAP2_NO_MMC)) 2652 if (!mmc_attach_mmc(host)) 2653 return 0; 2654 2655 mmc_power_off(host); 2656 return -EIO; 2657 } 2658 2659 int _mmc_detect_card_removed(struct mmc_host *host) 2660 { 2661 int ret; 2662 2663 if (!host->card || mmc_card_removed(host->card)) 2664 return 1; 2665 2666 ret = host->bus_ops->alive(host); 2667 2668 /* 2669 * Card detect status and alive check may be out of sync if card is 2670 * removed slowly, when card detect switch changes while card/slot 2671 * pads are still contacted in hardware (refer to "SD Card Mechanical 2672 * Addendum, Appendix C: Card Detection Switch"). So reschedule a 2673 * detect work 200ms later for this case. 2674 */ 2675 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { 2676 mmc_detect_change(host, msecs_to_jiffies(200)); 2677 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); 2678 } 2679 2680 if (ret) { 2681 mmc_card_set_removed(host->card); 2682 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2683 } 2684 2685 return ret; 2686 } 2687 2688 int mmc_detect_card_removed(struct mmc_host *host) 2689 { 2690 struct mmc_card *card = host->card; 2691 int ret; 2692 2693 WARN_ON(!host->claimed); 2694 2695 if (!card) 2696 return 1; 2697 2698 if (!mmc_card_is_removable(host)) 2699 return 0; 2700 2701 ret = mmc_card_removed(card); 2702 /* 2703 * The card will be considered unchanged unless we have been asked to 2704 * detect a change or host requires polling to provide card detection. 2705 */ 2706 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 2707 return ret; 2708 2709 host->detect_change = 0; 2710 if (!ret) { 2711 ret = _mmc_detect_card_removed(host); 2712 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { 2713 /* 2714 * Schedule a detect work as soon as possible to let a 2715 * rescan handle the card removal. 2716 */ 2717 cancel_delayed_work(&host->detect); 2718 _mmc_detect_change(host, 0, false); 2719 } 2720 } 2721 2722 return ret; 2723 } 2724 EXPORT_SYMBOL(mmc_detect_card_removed); 2725 2726 void mmc_rescan(struct work_struct *work) 2727 { 2728 struct mmc_host *host = 2729 container_of(work, struct mmc_host, detect.work); 2730 int i; 2731 2732 if (host->rescan_disable) 2733 return; 2734 2735 /* If there is a non-removable card registered, only scan once */ 2736 if (!mmc_card_is_removable(host) && host->rescan_entered) 2737 return; 2738 host->rescan_entered = 1; 2739 2740 if (host->trigger_card_event && host->ops->card_event) { 2741 mmc_claim_host(host); 2742 host->ops->card_event(host); 2743 mmc_release_host(host); 2744 host->trigger_card_event = false; 2745 } 2746 2747 mmc_bus_get(host); 2748 2749 /* 2750 * if there is a _removable_ card registered, check whether it is 2751 * still present 2752 */ 2753 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host)) 2754 host->bus_ops->detect(host); 2755 2756 host->detect_change = 0; 2757 2758 /* 2759 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2760 * the card is no longer present. 2761 */ 2762 mmc_bus_put(host); 2763 mmc_bus_get(host); 2764 2765 /* if there still is a card present, stop here */ 2766 if (host->bus_ops != NULL) { 2767 mmc_bus_put(host); 2768 goto out; 2769 } 2770 2771 /* 2772 * Only we can add a new handler, so it's safe to 2773 * release the lock here. 2774 */ 2775 mmc_bus_put(host); 2776 2777 mmc_claim_host(host); 2778 if (mmc_card_is_removable(host) && host->ops->get_cd && 2779 host->ops->get_cd(host) == 0) { 2780 mmc_power_off(host); 2781 mmc_release_host(host); 2782 goto out; 2783 } 2784 2785 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2786 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2787 break; 2788 if (freqs[i] <= host->f_min) 2789 break; 2790 } 2791 mmc_release_host(host); 2792 2793 out: 2794 if (host->caps & MMC_CAP_NEEDS_POLL) 2795 mmc_schedule_delayed_work(&host->detect, HZ); 2796 } 2797 2798 void mmc_start_host(struct mmc_host *host) 2799 { 2800 host->f_init = max(freqs[0], host->f_min); 2801 host->rescan_disable = 0; 2802 host->ios.power_mode = MMC_POWER_UNDEFINED; 2803 2804 if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) { 2805 mmc_claim_host(host); 2806 mmc_power_up(host, host->ocr_avail); 2807 mmc_release_host(host); 2808 } 2809 2810 mmc_gpiod_request_cd_irq(host); 2811 _mmc_detect_change(host, 0, false); 2812 } 2813 2814 void mmc_stop_host(struct mmc_host *host) 2815 { 2816 #ifdef CONFIG_MMC_DEBUG 2817 unsigned long flags; 2818 spin_lock_irqsave(&host->lock, flags); 2819 host->removed = 1; 2820 spin_unlock_irqrestore(&host->lock, flags); 2821 #endif 2822 if (host->slot.cd_irq >= 0) 2823 disable_irq(host->slot.cd_irq); 2824 2825 host->rescan_disable = 1; 2826 cancel_delayed_work_sync(&host->detect); 2827 2828 /* clear pm flags now and let card drivers set them as needed */ 2829 host->pm_flags = 0; 2830 2831 mmc_bus_get(host); 2832 if (host->bus_ops && !host->bus_dead) { 2833 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2834 host->bus_ops->remove(host); 2835 mmc_claim_host(host); 2836 mmc_detach_bus(host); 2837 mmc_power_off(host); 2838 mmc_release_host(host); 2839 mmc_bus_put(host); 2840 return; 2841 } 2842 mmc_bus_put(host); 2843 2844 mmc_claim_host(host); 2845 mmc_power_off(host); 2846 mmc_release_host(host); 2847 } 2848 2849 int mmc_power_save_host(struct mmc_host *host) 2850 { 2851 int ret = 0; 2852 2853 #ifdef CONFIG_MMC_DEBUG 2854 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2855 #endif 2856 2857 mmc_bus_get(host); 2858 2859 if (!host->bus_ops || host->bus_dead) { 2860 mmc_bus_put(host); 2861 return -EINVAL; 2862 } 2863 2864 if (host->bus_ops->power_save) 2865 ret = host->bus_ops->power_save(host); 2866 2867 mmc_bus_put(host); 2868 2869 mmc_power_off(host); 2870 2871 return ret; 2872 } 2873 EXPORT_SYMBOL(mmc_power_save_host); 2874 2875 int mmc_power_restore_host(struct mmc_host *host) 2876 { 2877 int ret; 2878 2879 #ifdef CONFIG_MMC_DEBUG 2880 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2881 #endif 2882 2883 mmc_bus_get(host); 2884 2885 if (!host->bus_ops || host->bus_dead) { 2886 mmc_bus_put(host); 2887 return -EINVAL; 2888 } 2889 2890 mmc_power_up(host, host->card->ocr); 2891 ret = host->bus_ops->power_restore(host); 2892 2893 mmc_bus_put(host); 2894 2895 return ret; 2896 } 2897 EXPORT_SYMBOL(mmc_power_restore_host); 2898 2899 /* 2900 * Flush the cache to the non-volatile storage. 2901 */ 2902 int mmc_flush_cache(struct mmc_card *card) 2903 { 2904 int err = 0; 2905 2906 if (mmc_card_mmc(card) && 2907 (card->ext_csd.cache_size > 0) && 2908 (card->ext_csd.cache_ctrl & 1)) { 2909 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2910 EXT_CSD_FLUSH_CACHE, 1, 0); 2911 if (err) 2912 pr_err("%s: cache flush error %d\n", 2913 mmc_hostname(card->host), err); 2914 } 2915 2916 return err; 2917 } 2918 EXPORT_SYMBOL(mmc_flush_cache); 2919 2920 #ifdef CONFIG_PM_SLEEP 2921 /* Do the card removal on suspend if card is assumed removeable 2922 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2923 to sync the card. 2924 */ 2925 static int mmc_pm_notify(struct notifier_block *notify_block, 2926 unsigned long mode, void *unused) 2927 { 2928 struct mmc_host *host = container_of( 2929 notify_block, struct mmc_host, pm_notify); 2930 unsigned long flags; 2931 int err = 0; 2932 2933 switch (mode) { 2934 case PM_HIBERNATION_PREPARE: 2935 case PM_SUSPEND_PREPARE: 2936 case PM_RESTORE_PREPARE: 2937 spin_lock_irqsave(&host->lock, flags); 2938 host->rescan_disable = 1; 2939 spin_unlock_irqrestore(&host->lock, flags); 2940 cancel_delayed_work_sync(&host->detect); 2941 2942 if (!host->bus_ops) 2943 break; 2944 2945 /* Validate prerequisites for suspend */ 2946 if (host->bus_ops->pre_suspend) 2947 err = host->bus_ops->pre_suspend(host); 2948 if (!err) 2949 break; 2950 2951 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2952 host->bus_ops->remove(host); 2953 mmc_claim_host(host); 2954 mmc_detach_bus(host); 2955 mmc_power_off(host); 2956 mmc_release_host(host); 2957 host->pm_flags = 0; 2958 break; 2959 2960 case PM_POST_SUSPEND: 2961 case PM_POST_HIBERNATION: 2962 case PM_POST_RESTORE: 2963 2964 spin_lock_irqsave(&host->lock, flags); 2965 host->rescan_disable = 0; 2966 spin_unlock_irqrestore(&host->lock, flags); 2967 _mmc_detect_change(host, 0, false); 2968 2969 } 2970 2971 return 0; 2972 } 2973 2974 void mmc_register_pm_notifier(struct mmc_host *host) 2975 { 2976 host->pm_notify.notifier_call = mmc_pm_notify; 2977 register_pm_notifier(&host->pm_notify); 2978 } 2979 2980 void mmc_unregister_pm_notifier(struct mmc_host *host) 2981 { 2982 unregister_pm_notifier(&host->pm_notify); 2983 } 2984 #endif 2985 2986 /** 2987 * mmc_init_context_info() - init synchronization context 2988 * @host: mmc host 2989 * 2990 * Init struct context_info needed to implement asynchronous 2991 * request mechanism, used by mmc core, host driver and mmc requests 2992 * supplier. 2993 */ 2994 void mmc_init_context_info(struct mmc_host *host) 2995 { 2996 host->context_info.is_new_req = false; 2997 host->context_info.is_done_rcv = false; 2998 host->context_info.is_waiting_last_req = false; 2999 init_waitqueue_head(&host->context_info.wait); 3000 } 3001 3002 static int __init mmc_init(void) 3003 { 3004 int ret; 3005 3006 ret = mmc_register_bus(); 3007 if (ret) 3008 return ret; 3009 3010 ret = mmc_register_host_class(); 3011 if (ret) 3012 goto unregister_bus; 3013 3014 ret = sdio_register_bus(); 3015 if (ret) 3016 goto unregister_host_class; 3017 3018 return 0; 3019 3020 unregister_host_class: 3021 mmc_unregister_host_class(); 3022 unregister_bus: 3023 mmc_unregister_bus(); 3024 return ret; 3025 } 3026 3027 static void __exit mmc_exit(void) 3028 { 3029 sdio_unregister_bus(); 3030 mmc_unregister_host_class(); 3031 mmc_unregister_bus(); 3032 } 3033 3034 subsys_initcall(mmc_init); 3035 module_exit(mmc_exit); 3036 3037 MODULE_LICENSE("GPL"); 3038