1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm_wakeup.h> 27 #include <linux/suspend.h> 28 #include <linux/fault-inject.h> 29 #include <linux/random.h> 30 #include <linux/slab.h> 31 #include <linux/of.h> 32 33 #include <linux/mmc/card.h> 34 #include <linux/mmc/host.h> 35 #include <linux/mmc/mmc.h> 36 #include <linux/mmc/sd.h> 37 #include <linux/mmc/slot-gpio.h> 38 39 #include "core.h" 40 #include "bus.h" 41 #include "host.h" 42 #include "sdio_bus.h" 43 #include "pwrseq.h" 44 45 #include "mmc_ops.h" 46 #include "sd_ops.h" 47 #include "sdio_ops.h" 48 49 /* If the device is not responding */ 50 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 51 52 /* 53 * Background operations can take a long time, depending on the housekeeping 54 * operations the card has to perform. 55 */ 56 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ 57 58 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 59 60 /* 61 * Enabling software CRCs on the data blocks can be a significant (30%) 62 * performance cost, and for other reasons may not always be desired. 63 * So we allow it it to be disabled. 64 */ 65 bool use_spi_crc = 1; 66 module_param(use_spi_crc, bool, 0); 67 68 static int mmc_schedule_delayed_work(struct delayed_work *work, 69 unsigned long delay) 70 { 71 /* 72 * We use the system_freezable_wq, because of two reasons. 73 * First, it allows several works (not the same work item) to be 74 * executed simultaneously. Second, the queue becomes frozen when 75 * userspace becomes frozen during system PM. 76 */ 77 return queue_delayed_work(system_freezable_wq, work, delay); 78 } 79 80 #ifdef CONFIG_FAIL_MMC_REQUEST 81 82 /* 83 * Internal function. Inject random data errors. 84 * If mmc_data is NULL no errors are injected. 85 */ 86 static void mmc_should_fail_request(struct mmc_host *host, 87 struct mmc_request *mrq) 88 { 89 struct mmc_command *cmd = mrq->cmd; 90 struct mmc_data *data = mrq->data; 91 static const int data_errors[] = { 92 -ETIMEDOUT, 93 -EILSEQ, 94 -EIO, 95 }; 96 97 if (!data) 98 return; 99 100 if (cmd->error || data->error || 101 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 102 return; 103 104 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 105 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 106 } 107 108 #else /* CONFIG_FAIL_MMC_REQUEST */ 109 110 static inline void mmc_should_fail_request(struct mmc_host *host, 111 struct mmc_request *mrq) 112 { 113 } 114 115 #endif /* CONFIG_FAIL_MMC_REQUEST */ 116 117 /** 118 * mmc_request_done - finish processing an MMC request 119 * @host: MMC host which completed request 120 * @mrq: MMC request which request 121 * 122 * MMC drivers should call this function when they have completed 123 * their processing of a request. 124 */ 125 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 126 { 127 struct mmc_command *cmd = mrq->cmd; 128 int err = cmd->error; 129 130 /* Flag re-tuning needed on CRC errors */ 131 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && 132 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && 133 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || 134 (mrq->data && mrq->data->error == -EILSEQ) || 135 (mrq->stop && mrq->stop->error == -EILSEQ))) 136 mmc_retune_needed(host); 137 138 if (err && cmd->retries && mmc_host_is_spi(host)) { 139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 140 cmd->retries = 0; 141 } 142 143 if (err && cmd->retries && !mmc_card_removed(host->card)) { 144 /* 145 * Request starter must handle retries - see 146 * mmc_wait_for_req_done(). 147 */ 148 if (mrq->done) 149 mrq->done(mrq); 150 } else { 151 mmc_should_fail_request(host, mrq); 152 153 led_trigger_event(host->led, LED_OFF); 154 155 if (mrq->sbc) { 156 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n", 157 mmc_hostname(host), mrq->sbc->opcode, 158 mrq->sbc->error, 159 mrq->sbc->resp[0], mrq->sbc->resp[1], 160 mrq->sbc->resp[2], mrq->sbc->resp[3]); 161 } 162 163 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 164 mmc_hostname(host), cmd->opcode, err, 165 cmd->resp[0], cmd->resp[1], 166 cmd->resp[2], cmd->resp[3]); 167 168 if (mrq->data) { 169 pr_debug("%s: %d bytes transferred: %d\n", 170 mmc_hostname(host), 171 mrq->data->bytes_xfered, mrq->data->error); 172 } 173 174 if (mrq->stop) { 175 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 176 mmc_hostname(host), mrq->stop->opcode, 177 mrq->stop->error, 178 mrq->stop->resp[0], mrq->stop->resp[1], 179 mrq->stop->resp[2], mrq->stop->resp[3]); 180 } 181 182 if (mrq->done) 183 mrq->done(mrq); 184 } 185 } 186 187 EXPORT_SYMBOL(mmc_request_done); 188 189 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 190 { 191 int err; 192 193 /* Assumes host controller has been runtime resumed by mmc_claim_host */ 194 err = mmc_retune(host); 195 if (err) { 196 mrq->cmd->error = err; 197 mmc_request_done(host, mrq); 198 return; 199 } 200 201 /* 202 * For sdio rw commands we must wait for card busy otherwise some 203 * sdio devices won't work properly. 204 */ 205 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) { 206 int tries = 500; /* Wait aprox 500ms at maximum */ 207 208 while (host->ops->card_busy(host) && --tries) 209 mmc_delay(1); 210 211 if (tries == 0) { 212 mrq->cmd->error = -EBUSY; 213 mmc_request_done(host, mrq); 214 return; 215 } 216 } 217 218 host->ops->request(host, mrq); 219 } 220 221 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 222 { 223 #ifdef CONFIG_MMC_DEBUG 224 unsigned int i, sz; 225 struct scatterlist *sg; 226 #endif 227 mmc_retune_hold(host); 228 229 if (mmc_card_removed(host->card)) 230 return -ENOMEDIUM; 231 232 if (mrq->sbc) { 233 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 234 mmc_hostname(host), mrq->sbc->opcode, 235 mrq->sbc->arg, mrq->sbc->flags); 236 } 237 238 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 239 mmc_hostname(host), mrq->cmd->opcode, 240 mrq->cmd->arg, mrq->cmd->flags); 241 242 if (mrq->data) { 243 pr_debug("%s: blksz %d blocks %d flags %08x " 244 "tsac %d ms nsac %d\n", 245 mmc_hostname(host), mrq->data->blksz, 246 mrq->data->blocks, mrq->data->flags, 247 mrq->data->timeout_ns / 1000000, 248 mrq->data->timeout_clks); 249 } 250 251 if (mrq->stop) { 252 pr_debug("%s: CMD%u arg %08x flags %08x\n", 253 mmc_hostname(host), mrq->stop->opcode, 254 mrq->stop->arg, mrq->stop->flags); 255 } 256 257 WARN_ON(!host->claimed); 258 259 mrq->cmd->error = 0; 260 mrq->cmd->mrq = mrq; 261 if (mrq->sbc) { 262 mrq->sbc->error = 0; 263 mrq->sbc->mrq = mrq; 264 } 265 if (mrq->data) { 266 BUG_ON(mrq->data->blksz > host->max_blk_size); 267 BUG_ON(mrq->data->blocks > host->max_blk_count); 268 BUG_ON(mrq->data->blocks * mrq->data->blksz > 269 host->max_req_size); 270 271 #ifdef CONFIG_MMC_DEBUG 272 sz = 0; 273 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 274 sz += sg->length; 275 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 276 #endif 277 278 mrq->cmd->data = mrq->data; 279 mrq->data->error = 0; 280 mrq->data->mrq = mrq; 281 if (mrq->stop) { 282 mrq->data->stop = mrq->stop; 283 mrq->stop->error = 0; 284 mrq->stop->mrq = mrq; 285 } 286 } 287 led_trigger_event(host->led, LED_FULL); 288 __mmc_start_request(host, mrq); 289 290 return 0; 291 } 292 293 /** 294 * mmc_start_bkops - start BKOPS for supported cards 295 * @card: MMC card to start BKOPS 296 * @form_exception: A flag to indicate if this function was 297 * called due to an exception raised by the card 298 * 299 * Start background operations whenever requested. 300 * When the urgent BKOPS bit is set in a R1 command response 301 * then background operations should be started immediately. 302 */ 303 void mmc_start_bkops(struct mmc_card *card, bool from_exception) 304 { 305 int err; 306 int timeout; 307 bool use_busy_signal; 308 309 BUG_ON(!card); 310 311 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card)) 312 return; 313 314 err = mmc_read_bkops_status(card); 315 if (err) { 316 pr_err("%s: Failed to read bkops status: %d\n", 317 mmc_hostname(card->host), err); 318 return; 319 } 320 321 if (!card->ext_csd.raw_bkops_status) 322 return; 323 324 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && 325 from_exception) 326 return; 327 328 mmc_claim_host(card->host); 329 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { 330 timeout = MMC_BKOPS_MAX_TIMEOUT; 331 use_busy_signal = true; 332 } else { 333 timeout = 0; 334 use_busy_signal = false; 335 } 336 337 mmc_retune_hold(card->host); 338 339 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 340 EXT_CSD_BKOPS_START, 1, timeout, 341 use_busy_signal, true, false); 342 if (err) { 343 pr_warn("%s: Error %d starting bkops\n", 344 mmc_hostname(card->host), err); 345 mmc_retune_release(card->host); 346 goto out; 347 } 348 349 /* 350 * For urgent bkops status (LEVEL_2 and more) 351 * bkops executed synchronously, otherwise 352 * the operation is in progress 353 */ 354 if (!use_busy_signal) 355 mmc_card_set_doing_bkops(card); 356 else 357 mmc_retune_release(card->host); 358 out: 359 mmc_release_host(card->host); 360 } 361 EXPORT_SYMBOL(mmc_start_bkops); 362 363 /* 364 * mmc_wait_data_done() - done callback for data request 365 * @mrq: done data request 366 * 367 * Wakes up mmc context, passed as a callback to host controller driver 368 */ 369 static void mmc_wait_data_done(struct mmc_request *mrq) 370 { 371 struct mmc_context_info *context_info = &mrq->host->context_info; 372 373 context_info->is_done_rcv = true; 374 wake_up_interruptible(&context_info->wait); 375 } 376 377 static void mmc_wait_done(struct mmc_request *mrq) 378 { 379 complete(&mrq->completion); 380 } 381 382 /* 383 *__mmc_start_data_req() - starts data request 384 * @host: MMC host to start the request 385 * @mrq: data request to start 386 * 387 * Sets the done callback to be called when request is completed by the card. 388 * Starts data mmc request execution 389 */ 390 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) 391 { 392 int err; 393 394 mrq->done = mmc_wait_data_done; 395 mrq->host = host; 396 397 err = mmc_start_request(host, mrq); 398 if (err) { 399 mrq->cmd->error = err; 400 mmc_wait_data_done(mrq); 401 } 402 403 return err; 404 } 405 406 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 407 { 408 int err; 409 410 init_completion(&mrq->completion); 411 mrq->done = mmc_wait_done; 412 413 err = mmc_start_request(host, mrq); 414 if (err) { 415 mrq->cmd->error = err; 416 complete(&mrq->completion); 417 } 418 419 return err; 420 } 421 422 /* 423 * mmc_wait_for_data_req_done() - wait for request completed 424 * @host: MMC host to prepare the command. 425 * @mrq: MMC request to wait for 426 * 427 * Blocks MMC context till host controller will ack end of data request 428 * execution or new request notification arrives from the block layer. 429 * Handles command retries. 430 * 431 * Returns enum mmc_blk_status after checking errors. 432 */ 433 static int mmc_wait_for_data_req_done(struct mmc_host *host, 434 struct mmc_request *mrq, 435 struct mmc_async_req *next_req) 436 { 437 struct mmc_command *cmd; 438 struct mmc_context_info *context_info = &host->context_info; 439 int err; 440 unsigned long flags; 441 442 while (1) { 443 wait_event_interruptible(context_info->wait, 444 (context_info->is_done_rcv || 445 context_info->is_new_req)); 446 spin_lock_irqsave(&context_info->lock, flags); 447 context_info->is_waiting_last_req = false; 448 spin_unlock_irqrestore(&context_info->lock, flags); 449 if (context_info->is_done_rcv) { 450 context_info->is_done_rcv = false; 451 context_info->is_new_req = false; 452 cmd = mrq->cmd; 453 454 if (!cmd->error || !cmd->retries || 455 mmc_card_removed(host->card)) { 456 err = host->areq->err_check(host->card, 457 host->areq); 458 break; /* return err */ 459 } else { 460 mmc_retune_recheck(host); 461 pr_info("%s: req failed (CMD%u): %d, retrying...\n", 462 mmc_hostname(host), 463 cmd->opcode, cmd->error); 464 cmd->retries--; 465 cmd->error = 0; 466 __mmc_start_request(host, mrq); 467 continue; /* wait for done/new event again */ 468 } 469 } else if (context_info->is_new_req) { 470 context_info->is_new_req = false; 471 if (!next_req) 472 return MMC_BLK_NEW_REQUEST; 473 } 474 } 475 mmc_retune_release(host); 476 return err; 477 } 478 479 static void mmc_wait_for_req_done(struct mmc_host *host, 480 struct mmc_request *mrq) 481 { 482 struct mmc_command *cmd; 483 484 while (1) { 485 wait_for_completion(&mrq->completion); 486 487 cmd = mrq->cmd; 488 489 /* 490 * If host has timed out waiting for the sanitize 491 * to complete, card might be still in programming state 492 * so let's try to bring the card out of programming 493 * state. 494 */ 495 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { 496 if (!mmc_interrupt_hpi(host->card)) { 497 pr_warn("%s: %s: Interrupted sanitize\n", 498 mmc_hostname(host), __func__); 499 cmd->error = 0; 500 break; 501 } else { 502 pr_err("%s: %s: Failed to interrupt sanitize\n", 503 mmc_hostname(host), __func__); 504 } 505 } 506 if (!cmd->error || !cmd->retries || 507 mmc_card_removed(host->card)) 508 break; 509 510 mmc_retune_recheck(host); 511 512 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 513 mmc_hostname(host), cmd->opcode, cmd->error); 514 cmd->retries--; 515 cmd->error = 0; 516 __mmc_start_request(host, mrq); 517 } 518 519 mmc_retune_release(host); 520 } 521 522 /** 523 * mmc_pre_req - Prepare for a new request 524 * @host: MMC host to prepare command 525 * @mrq: MMC request to prepare for 526 * @is_first_req: true if there is no previous started request 527 * that may run in parellel to this call, otherwise false 528 * 529 * mmc_pre_req() is called in prior to mmc_start_req() to let 530 * host prepare for the new request. Preparation of a request may be 531 * performed while another request is running on the host. 532 */ 533 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 534 bool is_first_req) 535 { 536 if (host->ops->pre_req) 537 host->ops->pre_req(host, mrq, is_first_req); 538 } 539 540 /** 541 * mmc_post_req - Post process a completed request 542 * @host: MMC host to post process command 543 * @mrq: MMC request to post process for 544 * @err: Error, if non zero, clean up any resources made in pre_req 545 * 546 * Let the host post process a completed request. Post processing of 547 * a request may be performed while another reuqest is running. 548 */ 549 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 550 int err) 551 { 552 if (host->ops->post_req) 553 host->ops->post_req(host, mrq, err); 554 } 555 556 /** 557 * mmc_start_req - start a non-blocking request 558 * @host: MMC host to start command 559 * @areq: async request to start 560 * @error: out parameter returns 0 for success, otherwise non zero 561 * 562 * Start a new MMC custom command request for a host. 563 * If there is on ongoing async request wait for completion 564 * of that request and start the new one and return. 565 * Does not wait for the new request to complete. 566 * 567 * Returns the completed request, NULL in case of none completed. 568 * Wait for the an ongoing request (previoulsy started) to complete and 569 * return the completed request. If there is no ongoing request, NULL 570 * is returned without waiting. NULL is not an error condition. 571 */ 572 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 573 struct mmc_async_req *areq, int *error) 574 { 575 int err = 0; 576 int start_err = 0; 577 struct mmc_async_req *data = host->areq; 578 579 /* Prepare a new request */ 580 if (areq) 581 mmc_pre_req(host, areq->mrq, !host->areq); 582 583 if (host->areq) { 584 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); 585 if (err == MMC_BLK_NEW_REQUEST) { 586 if (error) 587 *error = err; 588 /* 589 * The previous request was not completed, 590 * nothing to return 591 */ 592 return NULL; 593 } 594 /* 595 * Check BKOPS urgency for each R1 response 596 */ 597 if (host->card && mmc_card_mmc(host->card) && 598 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || 599 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && 600 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) { 601 602 /* Cancel the prepared request */ 603 if (areq) 604 mmc_post_req(host, areq->mrq, -EINVAL); 605 606 mmc_start_bkops(host->card, true); 607 608 /* prepare the request again */ 609 if (areq) 610 mmc_pre_req(host, areq->mrq, !host->areq); 611 } 612 } 613 614 if (!err && areq) 615 start_err = __mmc_start_data_req(host, areq->mrq); 616 617 if (host->areq) 618 mmc_post_req(host, host->areq->mrq, 0); 619 620 /* Cancel a prepared request if it was not started. */ 621 if ((err || start_err) && areq) 622 mmc_post_req(host, areq->mrq, -EINVAL); 623 624 if (err) 625 host->areq = NULL; 626 else 627 host->areq = areq; 628 629 if (error) 630 *error = err; 631 return data; 632 } 633 EXPORT_SYMBOL(mmc_start_req); 634 635 /** 636 * mmc_wait_for_req - start a request and wait for completion 637 * @host: MMC host to start command 638 * @mrq: MMC request to start 639 * 640 * Start a new MMC custom command request for a host, and wait 641 * for the command to complete. Does not attempt to parse the 642 * response. 643 */ 644 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 645 { 646 __mmc_start_req(host, mrq); 647 mmc_wait_for_req_done(host, mrq); 648 } 649 EXPORT_SYMBOL(mmc_wait_for_req); 650 651 /** 652 * mmc_interrupt_hpi - Issue for High priority Interrupt 653 * @card: the MMC card associated with the HPI transfer 654 * 655 * Issued High Priority Interrupt, and check for card status 656 * until out-of prg-state. 657 */ 658 int mmc_interrupt_hpi(struct mmc_card *card) 659 { 660 int err; 661 u32 status; 662 unsigned long prg_wait; 663 664 BUG_ON(!card); 665 666 if (!card->ext_csd.hpi_en) { 667 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 668 return 1; 669 } 670 671 mmc_claim_host(card->host); 672 err = mmc_send_status(card, &status); 673 if (err) { 674 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 675 goto out; 676 } 677 678 switch (R1_CURRENT_STATE(status)) { 679 case R1_STATE_IDLE: 680 case R1_STATE_READY: 681 case R1_STATE_STBY: 682 case R1_STATE_TRAN: 683 /* 684 * In idle and transfer states, HPI is not needed and the caller 685 * can issue the next intended command immediately 686 */ 687 goto out; 688 case R1_STATE_PRG: 689 break; 690 default: 691 /* In all other states, it's illegal to issue HPI */ 692 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 693 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 694 err = -EINVAL; 695 goto out; 696 } 697 698 err = mmc_send_hpi_cmd(card, &status); 699 if (err) 700 goto out; 701 702 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 703 do { 704 err = mmc_send_status(card, &status); 705 706 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 707 break; 708 if (time_after(jiffies, prg_wait)) 709 err = -ETIMEDOUT; 710 } while (!err); 711 712 out: 713 mmc_release_host(card->host); 714 return err; 715 } 716 EXPORT_SYMBOL(mmc_interrupt_hpi); 717 718 /** 719 * mmc_wait_for_cmd - start a command and wait for completion 720 * @host: MMC host to start command 721 * @cmd: MMC command to start 722 * @retries: maximum number of retries 723 * 724 * Start a new MMC command for a host, and wait for the command 725 * to complete. Return any error that occurred while the command 726 * was executing. Do not attempt to parse the response. 727 */ 728 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 729 { 730 struct mmc_request mrq = {NULL}; 731 732 WARN_ON(!host->claimed); 733 734 memset(cmd->resp, 0, sizeof(cmd->resp)); 735 cmd->retries = retries; 736 737 mrq.cmd = cmd; 738 cmd->data = NULL; 739 740 mmc_wait_for_req(host, &mrq); 741 742 return cmd->error; 743 } 744 745 EXPORT_SYMBOL(mmc_wait_for_cmd); 746 747 /** 748 * mmc_stop_bkops - stop ongoing BKOPS 749 * @card: MMC card to check BKOPS 750 * 751 * Send HPI command to stop ongoing background operations to 752 * allow rapid servicing of foreground operations, e.g. read/ 753 * writes. Wait until the card comes out of the programming state 754 * to avoid errors in servicing read/write requests. 755 */ 756 int mmc_stop_bkops(struct mmc_card *card) 757 { 758 int err = 0; 759 760 BUG_ON(!card); 761 err = mmc_interrupt_hpi(card); 762 763 /* 764 * If err is EINVAL, we can't issue an HPI. 765 * It should complete the BKOPS. 766 */ 767 if (!err || (err == -EINVAL)) { 768 mmc_card_clr_doing_bkops(card); 769 mmc_retune_release(card->host); 770 err = 0; 771 } 772 773 return err; 774 } 775 EXPORT_SYMBOL(mmc_stop_bkops); 776 777 int mmc_read_bkops_status(struct mmc_card *card) 778 { 779 int err; 780 u8 *ext_csd; 781 782 mmc_claim_host(card->host); 783 err = mmc_get_ext_csd(card, &ext_csd); 784 mmc_release_host(card->host); 785 if (err) 786 return err; 787 788 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 789 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 790 kfree(ext_csd); 791 return 0; 792 } 793 EXPORT_SYMBOL(mmc_read_bkops_status); 794 795 /** 796 * mmc_set_data_timeout - set the timeout for a data command 797 * @data: data phase for command 798 * @card: the MMC card associated with the data transfer 799 * 800 * Computes the data timeout parameters according to the 801 * correct algorithm given the card type. 802 */ 803 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 804 { 805 unsigned int mult; 806 807 /* 808 * SDIO cards only define an upper 1 s limit on access. 809 */ 810 if (mmc_card_sdio(card)) { 811 data->timeout_ns = 1000000000; 812 data->timeout_clks = 0; 813 return; 814 } 815 816 /* 817 * SD cards use a 100 multiplier rather than 10 818 */ 819 mult = mmc_card_sd(card) ? 100 : 10; 820 821 /* 822 * Scale up the multiplier (and therefore the timeout) by 823 * the r2w factor for writes. 824 */ 825 if (data->flags & MMC_DATA_WRITE) 826 mult <<= card->csd.r2w_factor; 827 828 data->timeout_ns = card->csd.tacc_ns * mult; 829 data->timeout_clks = card->csd.tacc_clks * mult; 830 831 /* 832 * SD cards also have an upper limit on the timeout. 833 */ 834 if (mmc_card_sd(card)) { 835 unsigned int timeout_us, limit_us; 836 837 timeout_us = data->timeout_ns / 1000; 838 if (card->host->ios.clock) 839 timeout_us += data->timeout_clks * 1000 / 840 (card->host->ios.clock / 1000); 841 842 if (data->flags & MMC_DATA_WRITE) 843 /* 844 * The MMC spec "It is strongly recommended 845 * for hosts to implement more than 500ms 846 * timeout value even if the card indicates 847 * the 250ms maximum busy length." Even the 848 * previous value of 300ms is known to be 849 * insufficient for some cards. 850 */ 851 limit_us = 3000000; 852 else 853 limit_us = 100000; 854 855 /* 856 * SDHC cards always use these fixed values. 857 */ 858 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 859 data->timeout_ns = limit_us * 1000; 860 data->timeout_clks = 0; 861 } 862 863 /* assign limit value if invalid */ 864 if (timeout_us == 0) 865 data->timeout_ns = limit_us * 1000; 866 } 867 868 /* 869 * Some cards require longer data read timeout than indicated in CSD. 870 * Address this by setting the read timeout to a "reasonably high" 871 * value. For the cards tested, 300ms has proven enough. If necessary, 872 * this value can be increased if other problematic cards require this. 873 */ 874 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 875 data->timeout_ns = 300000000; 876 data->timeout_clks = 0; 877 } 878 879 /* 880 * Some cards need very high timeouts if driven in SPI mode. 881 * The worst observed timeout was 900ms after writing a 882 * continuous stream of data until the internal logic 883 * overflowed. 884 */ 885 if (mmc_host_is_spi(card->host)) { 886 if (data->flags & MMC_DATA_WRITE) { 887 if (data->timeout_ns < 1000000000) 888 data->timeout_ns = 1000000000; /* 1s */ 889 } else { 890 if (data->timeout_ns < 100000000) 891 data->timeout_ns = 100000000; /* 100ms */ 892 } 893 } 894 } 895 EXPORT_SYMBOL(mmc_set_data_timeout); 896 897 /** 898 * mmc_align_data_size - pads a transfer size to a more optimal value 899 * @card: the MMC card associated with the data transfer 900 * @sz: original transfer size 901 * 902 * Pads the original data size with a number of extra bytes in 903 * order to avoid controller bugs and/or performance hits 904 * (e.g. some controllers revert to PIO for certain sizes). 905 * 906 * Returns the improved size, which might be unmodified. 907 * 908 * Note that this function is only relevant when issuing a 909 * single scatter gather entry. 910 */ 911 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 912 { 913 /* 914 * FIXME: We don't have a system for the controller to tell 915 * the core about its problems yet, so for now we just 32-bit 916 * align the size. 917 */ 918 sz = ((sz + 3) / 4) * 4; 919 920 return sz; 921 } 922 EXPORT_SYMBOL(mmc_align_data_size); 923 924 /** 925 * __mmc_claim_host - exclusively claim a host 926 * @host: mmc host to claim 927 * @abort: whether or not the operation should be aborted 928 * 929 * Claim a host for a set of operations. If @abort is non null and 930 * dereference a non-zero value then this will return prematurely with 931 * that non-zero value without acquiring the lock. Returns zero 932 * with the lock held otherwise. 933 */ 934 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 935 { 936 DECLARE_WAITQUEUE(wait, current); 937 unsigned long flags; 938 int stop; 939 bool pm = false; 940 941 might_sleep(); 942 943 add_wait_queue(&host->wq, &wait); 944 spin_lock_irqsave(&host->lock, flags); 945 while (1) { 946 set_current_state(TASK_UNINTERRUPTIBLE); 947 stop = abort ? atomic_read(abort) : 0; 948 if (stop || !host->claimed || host->claimer == current) 949 break; 950 spin_unlock_irqrestore(&host->lock, flags); 951 schedule(); 952 spin_lock_irqsave(&host->lock, flags); 953 } 954 set_current_state(TASK_RUNNING); 955 if (!stop) { 956 host->claimed = 1; 957 host->claimer = current; 958 host->claim_cnt += 1; 959 if (host->claim_cnt == 1) 960 pm = true; 961 } else 962 wake_up(&host->wq); 963 spin_unlock_irqrestore(&host->lock, flags); 964 remove_wait_queue(&host->wq, &wait); 965 966 if (pm) 967 pm_runtime_get_sync(mmc_dev(host)); 968 969 return stop; 970 } 971 EXPORT_SYMBOL(__mmc_claim_host); 972 973 /** 974 * mmc_release_host - release a host 975 * @host: mmc host to release 976 * 977 * Release a MMC host, allowing others to claim the host 978 * for their operations. 979 */ 980 void mmc_release_host(struct mmc_host *host) 981 { 982 unsigned long flags; 983 984 WARN_ON(!host->claimed); 985 986 spin_lock_irqsave(&host->lock, flags); 987 if (--host->claim_cnt) { 988 /* Release for nested claim */ 989 spin_unlock_irqrestore(&host->lock, flags); 990 } else { 991 host->claimed = 0; 992 host->claimer = NULL; 993 spin_unlock_irqrestore(&host->lock, flags); 994 wake_up(&host->wq); 995 pm_runtime_mark_last_busy(mmc_dev(host)); 996 pm_runtime_put_autosuspend(mmc_dev(host)); 997 } 998 } 999 EXPORT_SYMBOL(mmc_release_host); 1000 1001 /* 1002 * This is a helper function, which fetches a runtime pm reference for the 1003 * card device and also claims the host. 1004 */ 1005 void mmc_get_card(struct mmc_card *card) 1006 { 1007 pm_runtime_get_sync(&card->dev); 1008 mmc_claim_host(card->host); 1009 } 1010 EXPORT_SYMBOL(mmc_get_card); 1011 1012 /* 1013 * This is a helper function, which releases the host and drops the runtime 1014 * pm reference for the card device. 1015 */ 1016 void mmc_put_card(struct mmc_card *card) 1017 { 1018 mmc_release_host(card->host); 1019 pm_runtime_mark_last_busy(&card->dev); 1020 pm_runtime_put_autosuspend(&card->dev); 1021 } 1022 EXPORT_SYMBOL(mmc_put_card); 1023 1024 /* 1025 * Internal function that does the actual ios call to the host driver, 1026 * optionally printing some debug output. 1027 */ 1028 static inline void mmc_set_ios(struct mmc_host *host) 1029 { 1030 struct mmc_ios *ios = &host->ios; 1031 1032 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 1033 "width %u timing %u\n", 1034 mmc_hostname(host), ios->clock, ios->bus_mode, 1035 ios->power_mode, ios->chip_select, ios->vdd, 1036 ios->bus_width, ios->timing); 1037 1038 host->ops->set_ios(host, ios); 1039 } 1040 1041 /* 1042 * Control chip select pin on a host. 1043 */ 1044 void mmc_set_chip_select(struct mmc_host *host, int mode) 1045 { 1046 host->ios.chip_select = mode; 1047 mmc_set_ios(host); 1048 } 1049 1050 /* 1051 * Sets the host clock to the highest possible frequency that 1052 * is below "hz". 1053 */ 1054 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 1055 { 1056 WARN_ON(hz && hz < host->f_min); 1057 1058 if (hz > host->f_max) 1059 hz = host->f_max; 1060 1061 host->ios.clock = hz; 1062 mmc_set_ios(host); 1063 } 1064 1065 int mmc_execute_tuning(struct mmc_card *card) 1066 { 1067 struct mmc_host *host = card->host; 1068 u32 opcode; 1069 int err; 1070 1071 if (!host->ops->execute_tuning) 1072 return 0; 1073 1074 if (mmc_card_mmc(card)) 1075 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1076 else 1077 opcode = MMC_SEND_TUNING_BLOCK; 1078 1079 err = host->ops->execute_tuning(host, opcode); 1080 1081 if (err) 1082 pr_err("%s: tuning execution failed\n", mmc_hostname(host)); 1083 else 1084 mmc_retune_enable(host); 1085 1086 return err; 1087 } 1088 1089 /* 1090 * Change the bus mode (open drain/push-pull) of a host. 1091 */ 1092 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 1093 { 1094 host->ios.bus_mode = mode; 1095 mmc_set_ios(host); 1096 } 1097 1098 /* 1099 * Change data bus width of a host. 1100 */ 1101 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 1102 { 1103 host->ios.bus_width = width; 1104 mmc_set_ios(host); 1105 } 1106 1107 /* 1108 * Set initial state after a power cycle or a hw_reset. 1109 */ 1110 void mmc_set_initial_state(struct mmc_host *host) 1111 { 1112 mmc_retune_disable(host); 1113 1114 if (mmc_host_is_spi(host)) 1115 host->ios.chip_select = MMC_CS_HIGH; 1116 else 1117 host->ios.chip_select = MMC_CS_DONTCARE; 1118 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1119 host->ios.bus_width = MMC_BUS_WIDTH_1; 1120 host->ios.timing = MMC_TIMING_LEGACY; 1121 host->ios.drv_type = 0; 1122 1123 mmc_set_ios(host); 1124 } 1125 1126 /** 1127 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 1128 * @vdd: voltage (mV) 1129 * @low_bits: prefer low bits in boundary cases 1130 * 1131 * This function returns the OCR bit number according to the provided @vdd 1132 * value. If conversion is not possible a negative errno value returned. 1133 * 1134 * Depending on the @low_bits flag the function prefers low or high OCR bits 1135 * on boundary voltages. For example, 1136 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 1137 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 1138 * 1139 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 1140 */ 1141 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 1142 { 1143 const int max_bit = ilog2(MMC_VDD_35_36); 1144 int bit; 1145 1146 if (vdd < 1650 || vdd > 3600) 1147 return -EINVAL; 1148 1149 if (vdd >= 1650 && vdd <= 1950) 1150 return ilog2(MMC_VDD_165_195); 1151 1152 if (low_bits) 1153 vdd -= 1; 1154 1155 /* Base 2000 mV, step 100 mV, bit's base 8. */ 1156 bit = (vdd - 2000) / 100 + 8; 1157 if (bit > max_bit) 1158 return max_bit; 1159 return bit; 1160 } 1161 1162 /** 1163 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 1164 * @vdd_min: minimum voltage value (mV) 1165 * @vdd_max: maximum voltage value (mV) 1166 * 1167 * This function returns the OCR mask bits according to the provided @vdd_min 1168 * and @vdd_max values. If conversion is not possible the function returns 0. 1169 * 1170 * Notes wrt boundary cases: 1171 * This function sets the OCR bits for all boundary voltages, for example 1172 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1173 * MMC_VDD_34_35 mask. 1174 */ 1175 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1176 { 1177 u32 mask = 0; 1178 1179 if (vdd_max < vdd_min) 1180 return 0; 1181 1182 /* Prefer high bits for the boundary vdd_max values. */ 1183 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1184 if (vdd_max < 0) 1185 return 0; 1186 1187 /* Prefer low bits for the boundary vdd_min values. */ 1188 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1189 if (vdd_min < 0) 1190 return 0; 1191 1192 /* Fill the mask, from max bit to min bit. */ 1193 while (vdd_max >= vdd_min) 1194 mask |= 1 << vdd_max--; 1195 1196 return mask; 1197 } 1198 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1199 1200 #ifdef CONFIG_OF 1201 1202 /** 1203 * mmc_of_parse_voltage - return mask of supported voltages 1204 * @np: The device node need to be parsed. 1205 * @mask: mask of voltages available for MMC/SD/SDIO 1206 * 1207 * 1. Return zero on success. 1208 * 2. Return negative errno: voltage-range is invalid. 1209 */ 1210 int mmc_of_parse_voltage(struct device_node *np, u32 *mask) 1211 { 1212 const u32 *voltage_ranges; 1213 int num_ranges, i; 1214 1215 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); 1216 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; 1217 if (!voltage_ranges || !num_ranges) { 1218 pr_info("%s: voltage-ranges unspecified\n", np->full_name); 1219 return -EINVAL; 1220 } 1221 1222 for (i = 0; i < num_ranges; i++) { 1223 const int j = i * 2; 1224 u32 ocr_mask; 1225 1226 ocr_mask = mmc_vddrange_to_ocrmask( 1227 be32_to_cpu(voltage_ranges[j]), 1228 be32_to_cpu(voltage_ranges[j + 1])); 1229 if (!ocr_mask) { 1230 pr_err("%s: voltage-range #%d is invalid\n", 1231 np->full_name, i); 1232 return -EINVAL; 1233 } 1234 *mask |= ocr_mask; 1235 } 1236 1237 return 0; 1238 } 1239 EXPORT_SYMBOL(mmc_of_parse_voltage); 1240 1241 #endif /* CONFIG_OF */ 1242 1243 static int mmc_of_get_func_num(struct device_node *node) 1244 { 1245 u32 reg; 1246 int ret; 1247 1248 ret = of_property_read_u32(node, "reg", ®); 1249 if (ret < 0) 1250 return ret; 1251 1252 return reg; 1253 } 1254 1255 struct device_node *mmc_of_find_child_device(struct mmc_host *host, 1256 unsigned func_num) 1257 { 1258 struct device_node *node; 1259 1260 if (!host->parent || !host->parent->of_node) 1261 return NULL; 1262 1263 for_each_child_of_node(host->parent->of_node, node) { 1264 if (mmc_of_get_func_num(node) == func_num) 1265 return node; 1266 } 1267 1268 return NULL; 1269 } 1270 1271 #ifdef CONFIG_REGULATOR 1272 1273 /** 1274 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage 1275 * @vdd_bit: OCR bit number 1276 * @min_uV: minimum voltage value (mV) 1277 * @max_uV: maximum voltage value (mV) 1278 * 1279 * This function returns the voltage range according to the provided OCR 1280 * bit number. If conversion is not possible a negative errno value returned. 1281 */ 1282 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV) 1283 { 1284 int tmp; 1285 1286 if (!vdd_bit) 1287 return -EINVAL; 1288 1289 /* 1290 * REVISIT mmc_vddrange_to_ocrmask() may have set some 1291 * bits this regulator doesn't quite support ... don't 1292 * be too picky, most cards and regulators are OK with 1293 * a 0.1V range goof (it's a small error percentage). 1294 */ 1295 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1296 if (tmp == 0) { 1297 *min_uV = 1650 * 1000; 1298 *max_uV = 1950 * 1000; 1299 } else { 1300 *min_uV = 1900 * 1000 + tmp * 100 * 1000; 1301 *max_uV = *min_uV + 100 * 1000; 1302 } 1303 1304 return 0; 1305 } 1306 1307 /** 1308 * mmc_regulator_get_ocrmask - return mask of supported voltages 1309 * @supply: regulator to use 1310 * 1311 * This returns either a negative errno, or a mask of voltages that 1312 * can be provided to MMC/SD/SDIO devices using the specified voltage 1313 * regulator. This would normally be called before registering the 1314 * MMC host adapter. 1315 */ 1316 int mmc_regulator_get_ocrmask(struct regulator *supply) 1317 { 1318 int result = 0; 1319 int count; 1320 int i; 1321 int vdd_uV; 1322 int vdd_mV; 1323 1324 count = regulator_count_voltages(supply); 1325 if (count < 0) 1326 return count; 1327 1328 for (i = 0; i < count; i++) { 1329 vdd_uV = regulator_list_voltage(supply, i); 1330 if (vdd_uV <= 0) 1331 continue; 1332 1333 vdd_mV = vdd_uV / 1000; 1334 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1335 } 1336 1337 if (!result) { 1338 vdd_uV = regulator_get_voltage(supply); 1339 if (vdd_uV <= 0) 1340 return vdd_uV; 1341 1342 vdd_mV = vdd_uV / 1000; 1343 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1344 } 1345 1346 return result; 1347 } 1348 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); 1349 1350 /** 1351 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1352 * @mmc: the host to regulate 1353 * @supply: regulator to use 1354 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1355 * 1356 * Returns zero on success, else negative errno. 1357 * 1358 * MMC host drivers may use this to enable or disable a regulator using 1359 * a particular supply voltage. This would normally be called from the 1360 * set_ios() method. 1361 */ 1362 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1363 struct regulator *supply, 1364 unsigned short vdd_bit) 1365 { 1366 int result = 0; 1367 int min_uV, max_uV; 1368 1369 if (vdd_bit) { 1370 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV); 1371 1372 result = regulator_set_voltage(supply, min_uV, max_uV); 1373 if (result == 0 && !mmc->regulator_enabled) { 1374 result = regulator_enable(supply); 1375 if (!result) 1376 mmc->regulator_enabled = true; 1377 } 1378 } else if (mmc->regulator_enabled) { 1379 result = regulator_disable(supply); 1380 if (result == 0) 1381 mmc->regulator_enabled = false; 1382 } 1383 1384 if (result) 1385 dev_err(mmc_dev(mmc), 1386 "could not set regulator OCR (%d)\n", result); 1387 return result; 1388 } 1389 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1390 1391 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, 1392 int min_uV, int target_uV, 1393 int max_uV) 1394 { 1395 /* 1396 * Check if supported first to avoid errors since we may try several 1397 * signal levels during power up and don't want to show errors. 1398 */ 1399 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) 1400 return -EINVAL; 1401 1402 return regulator_set_voltage_triplet(regulator, min_uV, target_uV, 1403 max_uV); 1404 } 1405 1406 /** 1407 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios 1408 * 1409 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. 1410 * That will match the behavior of old boards where VQMMC and VMMC were supplied 1411 * by the same supply. The Bus Operating conditions for 3.3V signaling in the 1412 * SD card spec also define VQMMC in terms of VMMC. 1413 * If this is not possible we'll try the full 2.7-3.6V of the spec. 1414 * 1415 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the 1416 * requested voltage. This is definitely a good idea for UHS where there's a 1417 * separate regulator on the card that's trying to make 1.8V and it's best if 1418 * we match. 1419 * 1420 * This function is expected to be used by a controller's 1421 * start_signal_voltage_switch() function. 1422 */ 1423 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) 1424 { 1425 struct device *dev = mmc_dev(mmc); 1426 int ret, volt, min_uV, max_uV; 1427 1428 /* If no vqmmc supply then we can't change the voltage */ 1429 if (IS_ERR(mmc->supply.vqmmc)) 1430 return -EINVAL; 1431 1432 switch (ios->signal_voltage) { 1433 case MMC_SIGNAL_VOLTAGE_120: 1434 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1435 1100000, 1200000, 1300000); 1436 case MMC_SIGNAL_VOLTAGE_180: 1437 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1438 1700000, 1800000, 1950000); 1439 case MMC_SIGNAL_VOLTAGE_330: 1440 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); 1441 if (ret < 0) 1442 return ret; 1443 1444 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", 1445 __func__, volt, max_uV); 1446 1447 min_uV = max(volt - 300000, 2700000); 1448 max_uV = min(max_uV + 200000, 3600000); 1449 1450 /* 1451 * Due to a limitation in the current implementation of 1452 * regulator_set_voltage_triplet() which is taking the lowest 1453 * voltage possible if below the target, search for a suitable 1454 * voltage in two steps and try to stay close to vmmc 1455 * with a 0.3V tolerance at first. 1456 */ 1457 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1458 min_uV, volt, max_uV)) 1459 return 0; 1460 1461 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, 1462 2700000, volt, 3600000); 1463 default: 1464 return -EINVAL; 1465 } 1466 } 1467 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); 1468 1469 #endif /* CONFIG_REGULATOR */ 1470 1471 int mmc_regulator_get_supply(struct mmc_host *mmc) 1472 { 1473 struct device *dev = mmc_dev(mmc); 1474 int ret; 1475 1476 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); 1477 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); 1478 1479 if (IS_ERR(mmc->supply.vmmc)) { 1480 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) 1481 return -EPROBE_DEFER; 1482 dev_dbg(dev, "No vmmc regulator found\n"); 1483 } else { 1484 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); 1485 if (ret > 0) 1486 mmc->ocr_avail = ret; 1487 else 1488 dev_warn(dev, "Failed getting OCR mask: %d\n", ret); 1489 } 1490 1491 if (IS_ERR(mmc->supply.vqmmc)) { 1492 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) 1493 return -EPROBE_DEFER; 1494 dev_dbg(dev, "No vqmmc regulator found\n"); 1495 } 1496 1497 return 0; 1498 } 1499 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1500 1501 /* 1502 * Mask off any voltages we don't support and select 1503 * the lowest voltage 1504 */ 1505 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1506 { 1507 int bit; 1508 1509 /* 1510 * Sanity check the voltages that the card claims to 1511 * support. 1512 */ 1513 if (ocr & 0x7F) { 1514 dev_warn(mmc_dev(host), 1515 "card claims to support voltages below defined range\n"); 1516 ocr &= ~0x7F; 1517 } 1518 1519 ocr &= host->ocr_avail; 1520 if (!ocr) { 1521 dev_warn(mmc_dev(host), "no support for card's volts\n"); 1522 return 0; 1523 } 1524 1525 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { 1526 bit = ffs(ocr) - 1; 1527 ocr &= 3 << bit; 1528 mmc_power_cycle(host, ocr); 1529 } else { 1530 bit = fls(ocr) - 1; 1531 ocr &= 3 << bit; 1532 if (bit != host->ios.vdd) 1533 dev_warn(mmc_dev(host), "exceeding card's volts\n"); 1534 } 1535 1536 return ocr; 1537 } 1538 1539 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1540 { 1541 int err = 0; 1542 int old_signal_voltage = host->ios.signal_voltage; 1543 1544 host->ios.signal_voltage = signal_voltage; 1545 if (host->ops->start_signal_voltage_switch) 1546 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1547 1548 if (err) 1549 host->ios.signal_voltage = old_signal_voltage; 1550 1551 return err; 1552 1553 } 1554 1555 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) 1556 { 1557 struct mmc_command cmd = {0}; 1558 int err = 0; 1559 u32 clock; 1560 1561 BUG_ON(!host); 1562 1563 /* 1564 * Send CMD11 only if the request is to switch the card to 1565 * 1.8V signalling. 1566 */ 1567 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1568 return __mmc_set_signal_voltage(host, signal_voltage); 1569 1570 /* 1571 * If we cannot switch voltages, return failure so the caller 1572 * can continue without UHS mode 1573 */ 1574 if (!host->ops->start_signal_voltage_switch) 1575 return -EPERM; 1576 if (!host->ops->card_busy) 1577 pr_warn("%s: cannot verify signal voltage switch\n", 1578 mmc_hostname(host)); 1579 1580 cmd.opcode = SD_SWITCH_VOLTAGE; 1581 cmd.arg = 0; 1582 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1583 1584 err = mmc_wait_for_cmd(host, &cmd, 0); 1585 if (err) 1586 return err; 1587 1588 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1589 return -EIO; 1590 1591 /* 1592 * The card should drive cmd and dat[0:3] low immediately 1593 * after the response of cmd11, but wait 1 ms to be sure 1594 */ 1595 mmc_delay(1); 1596 if (host->ops->card_busy && !host->ops->card_busy(host)) { 1597 err = -EAGAIN; 1598 goto power_cycle; 1599 } 1600 /* 1601 * During a signal voltage level switch, the clock must be gated 1602 * for 5 ms according to the SD spec 1603 */ 1604 clock = host->ios.clock; 1605 host->ios.clock = 0; 1606 mmc_set_ios(host); 1607 1608 if (__mmc_set_signal_voltage(host, signal_voltage)) { 1609 /* 1610 * Voltages may not have been switched, but we've already 1611 * sent CMD11, so a power cycle is required anyway 1612 */ 1613 err = -EAGAIN; 1614 goto power_cycle; 1615 } 1616 1617 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ 1618 mmc_delay(10); 1619 host->ios.clock = clock; 1620 mmc_set_ios(host); 1621 1622 /* Wait for at least 1 ms according to spec */ 1623 mmc_delay(1); 1624 1625 /* 1626 * Failure to switch is indicated by the card holding 1627 * dat[0:3] low 1628 */ 1629 if (host->ops->card_busy && host->ops->card_busy(host)) 1630 err = -EAGAIN; 1631 1632 power_cycle: 1633 if (err) { 1634 pr_debug("%s: Signal voltage switch failed, " 1635 "power cycling card\n", mmc_hostname(host)); 1636 mmc_power_cycle(host, ocr); 1637 } 1638 1639 return err; 1640 } 1641 1642 /* 1643 * Select timing parameters for host. 1644 */ 1645 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1646 { 1647 host->ios.timing = timing; 1648 mmc_set_ios(host); 1649 } 1650 1651 /* 1652 * Select appropriate driver type for host. 1653 */ 1654 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1655 { 1656 host->ios.drv_type = drv_type; 1657 mmc_set_ios(host); 1658 } 1659 1660 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, 1661 int card_drv_type, int *drv_type) 1662 { 1663 struct mmc_host *host = card->host; 1664 int host_drv_type = SD_DRIVER_TYPE_B; 1665 1666 *drv_type = 0; 1667 1668 if (!host->ops->select_drive_strength) 1669 return 0; 1670 1671 /* Use SD definition of driver strength for hosts */ 1672 if (host->caps & MMC_CAP_DRIVER_TYPE_A) 1673 host_drv_type |= SD_DRIVER_TYPE_A; 1674 1675 if (host->caps & MMC_CAP_DRIVER_TYPE_C) 1676 host_drv_type |= SD_DRIVER_TYPE_C; 1677 1678 if (host->caps & MMC_CAP_DRIVER_TYPE_D) 1679 host_drv_type |= SD_DRIVER_TYPE_D; 1680 1681 /* 1682 * The drive strength that the hardware can support 1683 * depends on the board design. Pass the appropriate 1684 * information and let the hardware specific code 1685 * return what is possible given the options 1686 */ 1687 return host->ops->select_drive_strength(card, max_dtr, 1688 host_drv_type, 1689 card_drv_type, 1690 drv_type); 1691 } 1692 1693 /* 1694 * Apply power to the MMC stack. This is a two-stage process. 1695 * First, we enable power to the card without the clock running. 1696 * We then wait a bit for the power to stabilise. Finally, 1697 * enable the bus drivers and clock to the card. 1698 * 1699 * We must _NOT_ enable the clock prior to power stablising. 1700 * 1701 * If a host does all the power sequencing itself, ignore the 1702 * initial MMC_POWER_UP stage. 1703 */ 1704 void mmc_power_up(struct mmc_host *host, u32 ocr) 1705 { 1706 if (host->ios.power_mode == MMC_POWER_ON) 1707 return; 1708 1709 mmc_pwrseq_pre_power_on(host); 1710 1711 host->ios.vdd = fls(ocr) - 1; 1712 host->ios.power_mode = MMC_POWER_UP; 1713 /* Set initial state and call mmc_set_ios */ 1714 mmc_set_initial_state(host); 1715 1716 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ 1717 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) 1718 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); 1719 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) 1720 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); 1721 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) 1722 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); 1723 1724 /* 1725 * This delay should be sufficient to allow the power supply 1726 * to reach the minimum voltage. 1727 */ 1728 mmc_delay(10); 1729 1730 mmc_pwrseq_post_power_on(host); 1731 1732 host->ios.clock = host->f_init; 1733 1734 host->ios.power_mode = MMC_POWER_ON; 1735 mmc_set_ios(host); 1736 1737 /* 1738 * This delay must be at least 74 clock sizes, or 1 ms, or the 1739 * time required to reach a stable voltage. 1740 */ 1741 mmc_delay(10); 1742 } 1743 1744 void mmc_power_off(struct mmc_host *host) 1745 { 1746 if (host->ios.power_mode == MMC_POWER_OFF) 1747 return; 1748 1749 mmc_pwrseq_power_off(host); 1750 1751 host->ios.clock = 0; 1752 host->ios.vdd = 0; 1753 1754 host->ios.power_mode = MMC_POWER_OFF; 1755 /* Set initial state and call mmc_set_ios */ 1756 mmc_set_initial_state(host); 1757 1758 /* 1759 * Some configurations, such as the 802.11 SDIO card in the OLPC 1760 * XO-1.5, require a short delay after poweroff before the card 1761 * can be successfully turned on again. 1762 */ 1763 mmc_delay(1); 1764 } 1765 1766 void mmc_power_cycle(struct mmc_host *host, u32 ocr) 1767 { 1768 mmc_power_off(host); 1769 /* Wait at least 1 ms according to SD spec */ 1770 mmc_delay(1); 1771 mmc_power_up(host, ocr); 1772 } 1773 1774 /* 1775 * Cleanup when the last reference to the bus operator is dropped. 1776 */ 1777 static void __mmc_release_bus(struct mmc_host *host) 1778 { 1779 BUG_ON(!host); 1780 BUG_ON(host->bus_refs); 1781 BUG_ON(!host->bus_dead); 1782 1783 host->bus_ops = NULL; 1784 } 1785 1786 /* 1787 * Increase reference count of bus operator 1788 */ 1789 static inline void mmc_bus_get(struct mmc_host *host) 1790 { 1791 unsigned long flags; 1792 1793 spin_lock_irqsave(&host->lock, flags); 1794 host->bus_refs++; 1795 spin_unlock_irqrestore(&host->lock, flags); 1796 } 1797 1798 /* 1799 * Decrease reference count of bus operator and free it if 1800 * it is the last reference. 1801 */ 1802 static inline void mmc_bus_put(struct mmc_host *host) 1803 { 1804 unsigned long flags; 1805 1806 spin_lock_irqsave(&host->lock, flags); 1807 host->bus_refs--; 1808 if ((host->bus_refs == 0) && host->bus_ops) 1809 __mmc_release_bus(host); 1810 spin_unlock_irqrestore(&host->lock, flags); 1811 } 1812 1813 /* 1814 * Assign a mmc bus handler to a host. Only one bus handler may control a 1815 * host at any given time. 1816 */ 1817 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1818 { 1819 unsigned long flags; 1820 1821 BUG_ON(!host); 1822 BUG_ON(!ops); 1823 1824 WARN_ON(!host->claimed); 1825 1826 spin_lock_irqsave(&host->lock, flags); 1827 1828 BUG_ON(host->bus_ops); 1829 BUG_ON(host->bus_refs); 1830 1831 host->bus_ops = ops; 1832 host->bus_refs = 1; 1833 host->bus_dead = 0; 1834 1835 spin_unlock_irqrestore(&host->lock, flags); 1836 } 1837 1838 /* 1839 * Remove the current bus handler from a host. 1840 */ 1841 void mmc_detach_bus(struct mmc_host *host) 1842 { 1843 unsigned long flags; 1844 1845 BUG_ON(!host); 1846 1847 WARN_ON(!host->claimed); 1848 WARN_ON(!host->bus_ops); 1849 1850 spin_lock_irqsave(&host->lock, flags); 1851 1852 host->bus_dead = 1; 1853 1854 spin_unlock_irqrestore(&host->lock, flags); 1855 1856 mmc_bus_put(host); 1857 } 1858 1859 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, 1860 bool cd_irq) 1861 { 1862 #ifdef CONFIG_MMC_DEBUG 1863 unsigned long flags; 1864 spin_lock_irqsave(&host->lock, flags); 1865 WARN_ON(host->removed); 1866 spin_unlock_irqrestore(&host->lock, flags); 1867 #endif 1868 1869 /* 1870 * If the device is configured as wakeup, we prevent a new sleep for 1871 * 5 s to give provision for user space to consume the event. 1872 */ 1873 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && 1874 device_can_wakeup(mmc_dev(host))) 1875 pm_wakeup_event(mmc_dev(host), 5000); 1876 1877 host->detect_change = 1; 1878 mmc_schedule_delayed_work(&host->detect, delay); 1879 } 1880 1881 /** 1882 * mmc_detect_change - process change of state on a MMC socket 1883 * @host: host which changed state. 1884 * @delay: optional delay to wait before detection (jiffies) 1885 * 1886 * MMC drivers should call this when they detect a card has been 1887 * inserted or removed. The MMC layer will confirm that any 1888 * present card is still functional, and initialize any newly 1889 * inserted. 1890 */ 1891 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1892 { 1893 _mmc_detect_change(host, delay, true); 1894 } 1895 EXPORT_SYMBOL(mmc_detect_change); 1896 1897 void mmc_init_erase(struct mmc_card *card) 1898 { 1899 unsigned int sz; 1900 1901 if (is_power_of_2(card->erase_size)) 1902 card->erase_shift = ffs(card->erase_size) - 1; 1903 else 1904 card->erase_shift = 0; 1905 1906 /* 1907 * It is possible to erase an arbitrarily large area of an SD or MMC 1908 * card. That is not desirable because it can take a long time 1909 * (minutes) potentially delaying more important I/O, and also the 1910 * timeout calculations become increasingly hugely over-estimated. 1911 * Consequently, 'pref_erase' is defined as a guide to limit erases 1912 * to that size and alignment. 1913 * 1914 * For SD cards that define Allocation Unit size, limit erases to one 1915 * Allocation Unit at a time. For MMC cards that define High Capacity 1916 * Erase Size, whether it is switched on or not, limit to that size. 1917 * Otherwise just have a stab at a good value. For modern cards it 1918 * will end up being 4MiB. Note that if the value is too small, it 1919 * can end up taking longer to erase. 1920 */ 1921 if (mmc_card_sd(card) && card->ssr.au) { 1922 card->pref_erase = card->ssr.au; 1923 card->erase_shift = ffs(card->ssr.au) - 1; 1924 } else if (card->ext_csd.hc_erase_size) { 1925 card->pref_erase = card->ext_csd.hc_erase_size; 1926 } else if (card->erase_size) { 1927 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1928 if (sz < 128) 1929 card->pref_erase = 512 * 1024 / 512; 1930 else if (sz < 512) 1931 card->pref_erase = 1024 * 1024 / 512; 1932 else if (sz < 1024) 1933 card->pref_erase = 2 * 1024 * 1024 / 512; 1934 else 1935 card->pref_erase = 4 * 1024 * 1024 / 512; 1936 if (card->pref_erase < card->erase_size) 1937 card->pref_erase = card->erase_size; 1938 else { 1939 sz = card->pref_erase % card->erase_size; 1940 if (sz) 1941 card->pref_erase += card->erase_size - sz; 1942 } 1943 } else 1944 card->pref_erase = 0; 1945 } 1946 1947 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1948 unsigned int arg, unsigned int qty) 1949 { 1950 unsigned int erase_timeout; 1951 1952 if (arg == MMC_DISCARD_ARG || 1953 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1954 erase_timeout = card->ext_csd.trim_timeout; 1955 } else if (card->ext_csd.erase_group_def & 1) { 1956 /* High Capacity Erase Group Size uses HC timeouts */ 1957 if (arg == MMC_TRIM_ARG) 1958 erase_timeout = card->ext_csd.trim_timeout; 1959 else 1960 erase_timeout = card->ext_csd.hc_erase_timeout; 1961 } else { 1962 /* CSD Erase Group Size uses write timeout */ 1963 unsigned int mult = (10 << card->csd.r2w_factor); 1964 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1965 unsigned int timeout_us; 1966 1967 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1968 if (card->csd.tacc_ns < 1000000) 1969 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1970 else 1971 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1972 1973 /* 1974 * ios.clock is only a target. The real clock rate might be 1975 * less but not that much less, so fudge it by multiplying by 2. 1976 */ 1977 timeout_clks <<= 1; 1978 timeout_us += (timeout_clks * 1000) / 1979 (card->host->ios.clock / 1000); 1980 1981 erase_timeout = timeout_us / 1000; 1982 1983 /* 1984 * Theoretically, the calculation could underflow so round up 1985 * to 1ms in that case. 1986 */ 1987 if (!erase_timeout) 1988 erase_timeout = 1; 1989 } 1990 1991 /* Multiplier for secure operations */ 1992 if (arg & MMC_SECURE_ARGS) { 1993 if (arg == MMC_SECURE_ERASE_ARG) 1994 erase_timeout *= card->ext_csd.sec_erase_mult; 1995 else 1996 erase_timeout *= card->ext_csd.sec_trim_mult; 1997 } 1998 1999 erase_timeout *= qty; 2000 2001 /* 2002 * Ensure at least a 1 second timeout for SPI as per 2003 * 'mmc_set_data_timeout()' 2004 */ 2005 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 2006 erase_timeout = 1000; 2007 2008 return erase_timeout; 2009 } 2010 2011 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 2012 unsigned int arg, 2013 unsigned int qty) 2014 { 2015 unsigned int erase_timeout; 2016 2017 if (card->ssr.erase_timeout) { 2018 /* Erase timeout specified in SD Status Register (SSR) */ 2019 erase_timeout = card->ssr.erase_timeout * qty + 2020 card->ssr.erase_offset; 2021 } else { 2022 /* 2023 * Erase timeout not specified in SD Status Register (SSR) so 2024 * use 250ms per write block. 2025 */ 2026 erase_timeout = 250 * qty; 2027 } 2028 2029 /* Must not be less than 1 second */ 2030 if (erase_timeout < 1000) 2031 erase_timeout = 1000; 2032 2033 return erase_timeout; 2034 } 2035 2036 static unsigned int mmc_erase_timeout(struct mmc_card *card, 2037 unsigned int arg, 2038 unsigned int qty) 2039 { 2040 if (mmc_card_sd(card)) 2041 return mmc_sd_erase_timeout(card, arg, qty); 2042 else 2043 return mmc_mmc_erase_timeout(card, arg, qty); 2044 } 2045 2046 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 2047 unsigned int to, unsigned int arg) 2048 { 2049 struct mmc_command cmd = {0}; 2050 unsigned int qty = 0; 2051 unsigned long timeout; 2052 int err; 2053 2054 mmc_retune_hold(card->host); 2055 2056 /* 2057 * qty is used to calculate the erase timeout which depends on how many 2058 * erase groups (or allocation units in SD terminology) are affected. 2059 * We count erasing part of an erase group as one erase group. 2060 * For SD, the allocation units are always a power of 2. For MMC, the 2061 * erase group size is almost certainly also power of 2, but it does not 2062 * seem to insist on that in the JEDEC standard, so we fall back to 2063 * division in that case. SD may not specify an allocation unit size, 2064 * in which case the timeout is based on the number of write blocks. 2065 * 2066 * Note that the timeout for secure trim 2 will only be correct if the 2067 * number of erase groups specified is the same as the total of all 2068 * preceding secure trim 1 commands. Since the power may have been 2069 * lost since the secure trim 1 commands occurred, it is generally 2070 * impossible to calculate the secure trim 2 timeout correctly. 2071 */ 2072 if (card->erase_shift) 2073 qty += ((to >> card->erase_shift) - 2074 (from >> card->erase_shift)) + 1; 2075 else if (mmc_card_sd(card)) 2076 qty += to - from + 1; 2077 else 2078 qty += ((to / card->erase_size) - 2079 (from / card->erase_size)) + 1; 2080 2081 if (!mmc_card_blockaddr(card)) { 2082 from <<= 9; 2083 to <<= 9; 2084 } 2085 2086 if (mmc_card_sd(card)) 2087 cmd.opcode = SD_ERASE_WR_BLK_START; 2088 else 2089 cmd.opcode = MMC_ERASE_GROUP_START; 2090 cmd.arg = from; 2091 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2092 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2093 if (err) { 2094 pr_err("mmc_erase: group start error %d, " 2095 "status %#x\n", err, cmd.resp[0]); 2096 err = -EIO; 2097 goto out; 2098 } 2099 2100 memset(&cmd, 0, sizeof(struct mmc_command)); 2101 if (mmc_card_sd(card)) 2102 cmd.opcode = SD_ERASE_WR_BLK_END; 2103 else 2104 cmd.opcode = MMC_ERASE_GROUP_END; 2105 cmd.arg = to; 2106 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2107 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2108 if (err) { 2109 pr_err("mmc_erase: group end error %d, status %#x\n", 2110 err, cmd.resp[0]); 2111 err = -EIO; 2112 goto out; 2113 } 2114 2115 memset(&cmd, 0, sizeof(struct mmc_command)); 2116 cmd.opcode = MMC_ERASE; 2117 cmd.arg = arg; 2118 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 2119 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty); 2120 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2121 if (err) { 2122 pr_err("mmc_erase: erase error %d, status %#x\n", 2123 err, cmd.resp[0]); 2124 err = -EIO; 2125 goto out; 2126 } 2127 2128 if (mmc_host_is_spi(card->host)) 2129 goto out; 2130 2131 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); 2132 do { 2133 memset(&cmd, 0, sizeof(struct mmc_command)); 2134 cmd.opcode = MMC_SEND_STATUS; 2135 cmd.arg = card->rca << 16; 2136 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 2137 /* Do not retry else we can't see errors */ 2138 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2139 if (err || (cmd.resp[0] & 0xFDF92000)) { 2140 pr_err("error %d requesting status %#x\n", 2141 err, cmd.resp[0]); 2142 err = -EIO; 2143 goto out; 2144 } 2145 2146 /* Timeout if the device never becomes ready for data and 2147 * never leaves the program state. 2148 */ 2149 if (time_after(jiffies, timeout)) { 2150 pr_err("%s: Card stuck in programming state! %s\n", 2151 mmc_hostname(card->host), __func__); 2152 err = -EIO; 2153 goto out; 2154 } 2155 2156 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 2157 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); 2158 out: 2159 mmc_retune_release(card->host); 2160 return err; 2161 } 2162 2163 /** 2164 * mmc_erase - erase sectors. 2165 * @card: card to erase 2166 * @from: first sector to erase 2167 * @nr: number of sectors to erase 2168 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 2169 * 2170 * Caller must claim host before calling this function. 2171 */ 2172 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 2173 unsigned int arg) 2174 { 2175 unsigned int rem, to = from + nr; 2176 int err; 2177 2178 if (!(card->host->caps & MMC_CAP_ERASE) || 2179 !(card->csd.cmdclass & CCC_ERASE)) 2180 return -EOPNOTSUPP; 2181 2182 if (!card->erase_size) 2183 return -EOPNOTSUPP; 2184 2185 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 2186 return -EOPNOTSUPP; 2187 2188 if ((arg & MMC_SECURE_ARGS) && 2189 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 2190 return -EOPNOTSUPP; 2191 2192 if ((arg & MMC_TRIM_ARGS) && 2193 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 2194 return -EOPNOTSUPP; 2195 2196 if (arg == MMC_SECURE_ERASE_ARG) { 2197 if (from % card->erase_size || nr % card->erase_size) 2198 return -EINVAL; 2199 } 2200 2201 if (arg == MMC_ERASE_ARG) { 2202 rem = from % card->erase_size; 2203 if (rem) { 2204 rem = card->erase_size - rem; 2205 from += rem; 2206 if (nr > rem) 2207 nr -= rem; 2208 else 2209 return 0; 2210 } 2211 rem = nr % card->erase_size; 2212 if (rem) 2213 nr -= rem; 2214 } 2215 2216 if (nr == 0) 2217 return 0; 2218 2219 to = from + nr; 2220 2221 if (to <= from) 2222 return -EINVAL; 2223 2224 /* 'from' and 'to' are inclusive */ 2225 to -= 1; 2226 2227 /* 2228 * Special case where only one erase-group fits in the timeout budget: 2229 * If the region crosses an erase-group boundary on this particular 2230 * case, we will be trimming more than one erase-group which, does not 2231 * fit in the timeout budget of the controller, so we need to split it 2232 * and call mmc_do_erase() twice if necessary. This special case is 2233 * identified by the card->eg_boundary flag. 2234 */ 2235 rem = card->erase_size - (from % card->erase_size); 2236 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) { 2237 err = mmc_do_erase(card, from, from + rem - 1, arg); 2238 from += rem; 2239 if ((err) || (to <= from)) 2240 return err; 2241 } 2242 2243 return mmc_do_erase(card, from, to, arg); 2244 } 2245 EXPORT_SYMBOL(mmc_erase); 2246 2247 int mmc_can_erase(struct mmc_card *card) 2248 { 2249 if ((card->host->caps & MMC_CAP_ERASE) && 2250 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 2251 return 1; 2252 return 0; 2253 } 2254 EXPORT_SYMBOL(mmc_can_erase); 2255 2256 int mmc_can_trim(struct mmc_card *card) 2257 { 2258 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) && 2259 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN))) 2260 return 1; 2261 return 0; 2262 } 2263 EXPORT_SYMBOL(mmc_can_trim); 2264 2265 int mmc_can_discard(struct mmc_card *card) 2266 { 2267 /* 2268 * As there's no way to detect the discard support bit at v4.5 2269 * use the s/w feature support filed. 2270 */ 2271 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 2272 return 1; 2273 return 0; 2274 } 2275 EXPORT_SYMBOL(mmc_can_discard); 2276 2277 int mmc_can_sanitize(struct mmc_card *card) 2278 { 2279 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 2280 return 0; 2281 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 2282 return 1; 2283 return 0; 2284 } 2285 EXPORT_SYMBOL(mmc_can_sanitize); 2286 2287 int mmc_can_secure_erase_trim(struct mmc_card *card) 2288 { 2289 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) && 2290 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) 2291 return 1; 2292 return 0; 2293 } 2294 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 2295 2296 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 2297 unsigned int nr) 2298 { 2299 if (!card->erase_size) 2300 return 0; 2301 if (from % card->erase_size || nr % card->erase_size) 2302 return 0; 2303 return 1; 2304 } 2305 EXPORT_SYMBOL(mmc_erase_group_aligned); 2306 2307 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 2308 unsigned int arg) 2309 { 2310 struct mmc_host *host = card->host; 2311 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 2312 unsigned int last_timeout = 0; 2313 2314 if (card->erase_shift) 2315 max_qty = UINT_MAX >> card->erase_shift; 2316 else if (mmc_card_sd(card)) 2317 max_qty = UINT_MAX; 2318 else 2319 max_qty = UINT_MAX / card->erase_size; 2320 2321 /* Find the largest qty with an OK timeout */ 2322 do { 2323 y = 0; 2324 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 2325 timeout = mmc_erase_timeout(card, arg, qty + x); 2326 if (timeout > host->max_busy_timeout) 2327 break; 2328 if (timeout < last_timeout) 2329 break; 2330 last_timeout = timeout; 2331 y = x; 2332 } 2333 qty += y; 2334 } while (y); 2335 2336 if (!qty) 2337 return 0; 2338 2339 /* 2340 * When specifying a sector range to trim, chances are we might cross 2341 * an erase-group boundary even if the amount of sectors is less than 2342 * one erase-group. 2343 * If we can only fit one erase-group in the controller timeout budget, 2344 * we have to care that erase-group boundaries are not crossed by a 2345 * single trim operation. We flag that special case with "eg_boundary". 2346 * In all other cases we can just decrement qty and pretend that we 2347 * always touch (qty + 1) erase-groups as a simple optimization. 2348 */ 2349 if (qty == 1) 2350 card->eg_boundary = 1; 2351 else 2352 qty--; 2353 2354 /* Convert qty to sectors */ 2355 if (card->erase_shift) 2356 max_discard = qty << card->erase_shift; 2357 else if (mmc_card_sd(card)) 2358 max_discard = qty + 1; 2359 else 2360 max_discard = qty * card->erase_size; 2361 2362 return max_discard; 2363 } 2364 2365 unsigned int mmc_calc_max_discard(struct mmc_card *card) 2366 { 2367 struct mmc_host *host = card->host; 2368 unsigned int max_discard, max_trim; 2369 2370 if (!host->max_busy_timeout) 2371 return UINT_MAX; 2372 2373 /* 2374 * Without erase_group_def set, MMC erase timeout depends on clock 2375 * frequence which can change. In that case, the best choice is 2376 * just the preferred erase size. 2377 */ 2378 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 2379 return card->pref_erase; 2380 2381 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 2382 if (mmc_can_trim(card)) { 2383 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 2384 if (max_trim < max_discard) 2385 max_discard = max_trim; 2386 } else if (max_discard < card->erase_size) { 2387 max_discard = 0; 2388 } 2389 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 2390 mmc_hostname(host), max_discard, host->max_busy_timeout); 2391 return max_discard; 2392 } 2393 EXPORT_SYMBOL(mmc_calc_max_discard); 2394 2395 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 2396 { 2397 struct mmc_command cmd = {0}; 2398 2399 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card)) 2400 return 0; 2401 2402 cmd.opcode = MMC_SET_BLOCKLEN; 2403 cmd.arg = blocklen; 2404 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2405 return mmc_wait_for_cmd(card->host, &cmd, 5); 2406 } 2407 EXPORT_SYMBOL(mmc_set_blocklen); 2408 2409 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, 2410 bool is_rel_write) 2411 { 2412 struct mmc_command cmd = {0}; 2413 2414 cmd.opcode = MMC_SET_BLOCK_COUNT; 2415 cmd.arg = blockcount & 0x0000FFFF; 2416 if (is_rel_write) 2417 cmd.arg |= 1 << 31; 2418 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2419 return mmc_wait_for_cmd(card->host, &cmd, 5); 2420 } 2421 EXPORT_SYMBOL(mmc_set_blockcount); 2422 2423 static void mmc_hw_reset_for_init(struct mmc_host *host) 2424 { 2425 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2426 return; 2427 host->ops->hw_reset(host); 2428 } 2429 2430 int mmc_hw_reset(struct mmc_host *host) 2431 { 2432 int ret; 2433 2434 if (!host->card) 2435 return -EINVAL; 2436 2437 mmc_bus_get(host); 2438 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) { 2439 mmc_bus_put(host); 2440 return -EOPNOTSUPP; 2441 } 2442 2443 ret = host->bus_ops->reset(host); 2444 mmc_bus_put(host); 2445 2446 if (ret != -EOPNOTSUPP) 2447 pr_warn("%s: tried to reset card\n", mmc_hostname(host)); 2448 2449 return ret; 2450 } 2451 EXPORT_SYMBOL(mmc_hw_reset); 2452 2453 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2454 { 2455 host->f_init = freq; 2456 2457 #ifdef CONFIG_MMC_DEBUG 2458 pr_info("%s: %s: trying to init card at %u Hz\n", 2459 mmc_hostname(host), __func__, host->f_init); 2460 #endif 2461 mmc_power_up(host, host->ocr_avail); 2462 2463 /* 2464 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2465 * do a hardware reset if possible. 2466 */ 2467 mmc_hw_reset_for_init(host); 2468 2469 /* 2470 * sdio_reset sends CMD52 to reset card. Since we do not know 2471 * if the card is being re-initialized, just send it. CMD52 2472 * should be ignored by SD/eMMC cards. 2473 * Skip it if we already know that we do not support SDIO commands 2474 */ 2475 if (!(host->caps2 & MMC_CAP2_NO_SDIO)) 2476 sdio_reset(host); 2477 2478 mmc_go_idle(host); 2479 2480 mmc_send_if_cond(host, host->ocr_avail); 2481 2482 /* Order's important: probe SDIO, then SD, then MMC */ 2483 if (!(host->caps2 & MMC_CAP2_NO_SDIO)) 2484 if (!mmc_attach_sdio(host)) 2485 return 0; 2486 2487 if (!mmc_attach_sd(host)) 2488 return 0; 2489 if (!mmc_attach_mmc(host)) 2490 return 0; 2491 2492 mmc_power_off(host); 2493 return -EIO; 2494 } 2495 2496 int _mmc_detect_card_removed(struct mmc_host *host) 2497 { 2498 int ret; 2499 2500 if (!host->card || mmc_card_removed(host->card)) 2501 return 1; 2502 2503 ret = host->bus_ops->alive(host); 2504 2505 /* 2506 * Card detect status and alive check may be out of sync if card is 2507 * removed slowly, when card detect switch changes while card/slot 2508 * pads are still contacted in hardware (refer to "SD Card Mechanical 2509 * Addendum, Appendix C: Card Detection Switch"). So reschedule a 2510 * detect work 200ms later for this case. 2511 */ 2512 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { 2513 mmc_detect_change(host, msecs_to_jiffies(200)); 2514 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); 2515 } 2516 2517 if (ret) { 2518 mmc_card_set_removed(host->card); 2519 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2520 } 2521 2522 return ret; 2523 } 2524 2525 int mmc_detect_card_removed(struct mmc_host *host) 2526 { 2527 struct mmc_card *card = host->card; 2528 int ret; 2529 2530 WARN_ON(!host->claimed); 2531 2532 if (!card) 2533 return 1; 2534 2535 if (host->caps & MMC_CAP_NONREMOVABLE) 2536 return 0; 2537 2538 ret = mmc_card_removed(card); 2539 /* 2540 * The card will be considered unchanged unless we have been asked to 2541 * detect a change or host requires polling to provide card detection. 2542 */ 2543 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 2544 return ret; 2545 2546 host->detect_change = 0; 2547 if (!ret) { 2548 ret = _mmc_detect_card_removed(host); 2549 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { 2550 /* 2551 * Schedule a detect work as soon as possible to let a 2552 * rescan handle the card removal. 2553 */ 2554 cancel_delayed_work(&host->detect); 2555 _mmc_detect_change(host, 0, false); 2556 } 2557 } 2558 2559 return ret; 2560 } 2561 EXPORT_SYMBOL(mmc_detect_card_removed); 2562 2563 void mmc_rescan(struct work_struct *work) 2564 { 2565 struct mmc_host *host = 2566 container_of(work, struct mmc_host, detect.work); 2567 int i; 2568 2569 if (host->rescan_disable) 2570 return; 2571 2572 /* If there is a non-removable card registered, only scan once */ 2573 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) 2574 return; 2575 host->rescan_entered = 1; 2576 2577 if (host->trigger_card_event && host->ops->card_event) { 2578 mmc_claim_host(host); 2579 host->ops->card_event(host); 2580 mmc_release_host(host); 2581 host->trigger_card_event = false; 2582 } 2583 2584 mmc_bus_get(host); 2585 2586 /* 2587 * if there is a _removable_ card registered, check whether it is 2588 * still present 2589 */ 2590 if (host->bus_ops && !host->bus_dead 2591 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2592 host->bus_ops->detect(host); 2593 2594 host->detect_change = 0; 2595 2596 /* 2597 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2598 * the card is no longer present. 2599 */ 2600 mmc_bus_put(host); 2601 mmc_bus_get(host); 2602 2603 /* if there still is a card present, stop here */ 2604 if (host->bus_ops != NULL) { 2605 mmc_bus_put(host); 2606 goto out; 2607 } 2608 2609 /* 2610 * Only we can add a new handler, so it's safe to 2611 * release the lock here. 2612 */ 2613 mmc_bus_put(host); 2614 2615 mmc_claim_host(host); 2616 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd && 2617 host->ops->get_cd(host) == 0) { 2618 mmc_power_off(host); 2619 mmc_release_host(host); 2620 goto out; 2621 } 2622 2623 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2624 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2625 break; 2626 if (freqs[i] <= host->f_min) 2627 break; 2628 } 2629 mmc_release_host(host); 2630 2631 out: 2632 if (host->caps & MMC_CAP_NEEDS_POLL) 2633 mmc_schedule_delayed_work(&host->detect, HZ); 2634 } 2635 2636 void mmc_start_host(struct mmc_host *host) 2637 { 2638 host->f_init = max(freqs[0], host->f_min); 2639 host->rescan_disable = 0; 2640 host->ios.power_mode = MMC_POWER_UNDEFINED; 2641 2642 mmc_claim_host(host); 2643 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) 2644 mmc_power_off(host); 2645 else 2646 mmc_power_up(host, host->ocr_avail); 2647 mmc_release_host(host); 2648 2649 mmc_gpiod_request_cd_irq(host); 2650 _mmc_detect_change(host, 0, false); 2651 } 2652 2653 void mmc_stop_host(struct mmc_host *host) 2654 { 2655 #ifdef CONFIG_MMC_DEBUG 2656 unsigned long flags; 2657 spin_lock_irqsave(&host->lock, flags); 2658 host->removed = 1; 2659 spin_unlock_irqrestore(&host->lock, flags); 2660 #endif 2661 if (host->slot.cd_irq >= 0) 2662 disable_irq(host->slot.cd_irq); 2663 2664 host->rescan_disable = 1; 2665 cancel_delayed_work_sync(&host->detect); 2666 2667 /* clear pm flags now and let card drivers set them as needed */ 2668 host->pm_flags = 0; 2669 2670 mmc_bus_get(host); 2671 if (host->bus_ops && !host->bus_dead) { 2672 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2673 host->bus_ops->remove(host); 2674 mmc_claim_host(host); 2675 mmc_detach_bus(host); 2676 mmc_power_off(host); 2677 mmc_release_host(host); 2678 mmc_bus_put(host); 2679 return; 2680 } 2681 mmc_bus_put(host); 2682 2683 BUG_ON(host->card); 2684 2685 mmc_claim_host(host); 2686 mmc_power_off(host); 2687 mmc_release_host(host); 2688 } 2689 2690 int mmc_power_save_host(struct mmc_host *host) 2691 { 2692 int ret = 0; 2693 2694 #ifdef CONFIG_MMC_DEBUG 2695 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2696 #endif 2697 2698 mmc_bus_get(host); 2699 2700 if (!host->bus_ops || host->bus_dead) { 2701 mmc_bus_put(host); 2702 return -EINVAL; 2703 } 2704 2705 if (host->bus_ops->power_save) 2706 ret = host->bus_ops->power_save(host); 2707 2708 mmc_bus_put(host); 2709 2710 mmc_power_off(host); 2711 2712 return ret; 2713 } 2714 EXPORT_SYMBOL(mmc_power_save_host); 2715 2716 int mmc_power_restore_host(struct mmc_host *host) 2717 { 2718 int ret; 2719 2720 #ifdef CONFIG_MMC_DEBUG 2721 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2722 #endif 2723 2724 mmc_bus_get(host); 2725 2726 if (!host->bus_ops || host->bus_dead) { 2727 mmc_bus_put(host); 2728 return -EINVAL; 2729 } 2730 2731 mmc_power_up(host, host->card->ocr); 2732 ret = host->bus_ops->power_restore(host); 2733 2734 mmc_bus_put(host); 2735 2736 return ret; 2737 } 2738 EXPORT_SYMBOL(mmc_power_restore_host); 2739 2740 /* 2741 * Flush the cache to the non-volatile storage. 2742 */ 2743 int mmc_flush_cache(struct mmc_card *card) 2744 { 2745 int err = 0; 2746 2747 if (mmc_card_mmc(card) && 2748 (card->ext_csd.cache_size > 0) && 2749 (card->ext_csd.cache_ctrl & 1)) { 2750 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2751 EXT_CSD_FLUSH_CACHE, 1, 0); 2752 if (err) 2753 pr_err("%s: cache flush error %d\n", 2754 mmc_hostname(card->host), err); 2755 } 2756 2757 return err; 2758 } 2759 EXPORT_SYMBOL(mmc_flush_cache); 2760 2761 #ifdef CONFIG_PM_SLEEP 2762 /* Do the card removal on suspend if card is assumed removeable 2763 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2764 to sync the card. 2765 */ 2766 static int mmc_pm_notify(struct notifier_block *notify_block, 2767 unsigned long mode, void *unused) 2768 { 2769 struct mmc_host *host = container_of( 2770 notify_block, struct mmc_host, pm_notify); 2771 unsigned long flags; 2772 int err = 0; 2773 2774 switch (mode) { 2775 case PM_HIBERNATION_PREPARE: 2776 case PM_SUSPEND_PREPARE: 2777 case PM_RESTORE_PREPARE: 2778 spin_lock_irqsave(&host->lock, flags); 2779 host->rescan_disable = 1; 2780 spin_unlock_irqrestore(&host->lock, flags); 2781 cancel_delayed_work_sync(&host->detect); 2782 2783 if (!host->bus_ops) 2784 break; 2785 2786 /* Validate prerequisites for suspend */ 2787 if (host->bus_ops->pre_suspend) 2788 err = host->bus_ops->pre_suspend(host); 2789 if (!err) 2790 break; 2791 2792 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2793 host->bus_ops->remove(host); 2794 mmc_claim_host(host); 2795 mmc_detach_bus(host); 2796 mmc_power_off(host); 2797 mmc_release_host(host); 2798 host->pm_flags = 0; 2799 break; 2800 2801 case PM_POST_SUSPEND: 2802 case PM_POST_HIBERNATION: 2803 case PM_POST_RESTORE: 2804 2805 spin_lock_irqsave(&host->lock, flags); 2806 host->rescan_disable = 0; 2807 spin_unlock_irqrestore(&host->lock, flags); 2808 _mmc_detect_change(host, 0, false); 2809 2810 } 2811 2812 return 0; 2813 } 2814 2815 void mmc_register_pm_notifier(struct mmc_host *host) 2816 { 2817 host->pm_notify.notifier_call = mmc_pm_notify; 2818 register_pm_notifier(&host->pm_notify); 2819 } 2820 2821 void mmc_unregister_pm_notifier(struct mmc_host *host) 2822 { 2823 unregister_pm_notifier(&host->pm_notify); 2824 } 2825 #endif 2826 2827 /** 2828 * mmc_init_context_info() - init synchronization context 2829 * @host: mmc host 2830 * 2831 * Init struct context_info needed to implement asynchronous 2832 * request mechanism, used by mmc core, host driver and mmc requests 2833 * supplier. 2834 */ 2835 void mmc_init_context_info(struct mmc_host *host) 2836 { 2837 spin_lock_init(&host->context_info.lock); 2838 host->context_info.is_new_req = false; 2839 host->context_info.is_done_rcv = false; 2840 host->context_info.is_waiting_last_req = false; 2841 init_waitqueue_head(&host->context_info.wait); 2842 } 2843 2844 static int __init mmc_init(void) 2845 { 2846 int ret; 2847 2848 ret = mmc_register_bus(); 2849 if (ret) 2850 return ret; 2851 2852 ret = mmc_register_host_class(); 2853 if (ret) 2854 goto unregister_bus; 2855 2856 ret = sdio_register_bus(); 2857 if (ret) 2858 goto unregister_host_class; 2859 2860 return 0; 2861 2862 unregister_host_class: 2863 mmc_unregister_host_class(); 2864 unregister_bus: 2865 mmc_unregister_bus(); 2866 return ret; 2867 } 2868 2869 static void __exit mmc_exit(void) 2870 { 2871 sdio_unregister_bus(); 2872 mmc_unregister_host_class(); 2873 mmc_unregister_bus(); 2874 } 2875 2876 subsys_initcall(mmc_init); 2877 module_exit(mmc_exit); 2878 2879 MODULE_LICENSE("GPL"); 2880