1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm_wakeup.h> 27 #include <linux/suspend.h> 28 #include <linux/fault-inject.h> 29 #include <linux/random.h> 30 #include <linux/slab.h> 31 #include <linux/of.h> 32 33 #include <linux/mmc/card.h> 34 #include <linux/mmc/host.h> 35 #include <linux/mmc/mmc.h> 36 #include <linux/mmc/sd.h> 37 38 #include "core.h" 39 #include "bus.h" 40 #include "host.h" 41 #include "sdio_bus.h" 42 43 #include "mmc_ops.h" 44 #include "sd_ops.h" 45 #include "sdio_ops.h" 46 47 /* If the device is not responding */ 48 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 49 50 /* 51 * Background operations can take a long time, depending on the housekeeping 52 * operations the card has to perform. 53 */ 54 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ 55 56 static struct workqueue_struct *workqueue; 57 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 58 59 /* 60 * Enabling software CRCs on the data blocks can be a significant (30%) 61 * performance cost, and for other reasons may not always be desired. 62 * So we allow it it to be disabled. 63 */ 64 bool use_spi_crc = 1; 65 module_param(use_spi_crc, bool, 0); 66 67 /* 68 * We normally treat cards as removed during suspend if they are not 69 * known to be on a non-removable bus, to avoid the risk of writing 70 * back data to a different card after resume. Allow this to be 71 * overridden if necessary. 72 */ 73 #ifdef CONFIG_MMC_UNSAFE_RESUME 74 bool mmc_assume_removable; 75 #else 76 bool mmc_assume_removable = 1; 77 #endif 78 EXPORT_SYMBOL(mmc_assume_removable); 79 module_param_named(removable, mmc_assume_removable, bool, 0644); 80 MODULE_PARM_DESC( 81 removable, 82 "MMC/SD cards are removable and may be removed during suspend"); 83 84 /* 85 * Internal function. Schedule delayed work in the MMC work queue. 86 */ 87 static int mmc_schedule_delayed_work(struct delayed_work *work, 88 unsigned long delay) 89 { 90 return queue_delayed_work(workqueue, work, delay); 91 } 92 93 /* 94 * Internal function. Flush all scheduled work from the MMC work queue. 95 */ 96 static void mmc_flush_scheduled_work(void) 97 { 98 flush_workqueue(workqueue); 99 } 100 101 #ifdef CONFIG_FAIL_MMC_REQUEST 102 103 /* 104 * Internal function. Inject random data errors. 105 * If mmc_data is NULL no errors are injected. 106 */ 107 static void mmc_should_fail_request(struct mmc_host *host, 108 struct mmc_request *mrq) 109 { 110 struct mmc_command *cmd = mrq->cmd; 111 struct mmc_data *data = mrq->data; 112 static const int data_errors[] = { 113 -ETIMEDOUT, 114 -EILSEQ, 115 -EIO, 116 }; 117 118 if (!data) 119 return; 120 121 if (cmd->error || data->error || 122 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 123 return; 124 125 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 126 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 127 } 128 129 #else /* CONFIG_FAIL_MMC_REQUEST */ 130 131 static inline void mmc_should_fail_request(struct mmc_host *host, 132 struct mmc_request *mrq) 133 { 134 } 135 136 #endif /* CONFIG_FAIL_MMC_REQUEST */ 137 138 /** 139 * mmc_request_done - finish processing an MMC request 140 * @host: MMC host which completed request 141 * @mrq: MMC request which request 142 * 143 * MMC drivers should call this function when they have completed 144 * their processing of a request. 145 */ 146 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 147 { 148 struct mmc_command *cmd = mrq->cmd; 149 int err = cmd->error; 150 151 if (err && cmd->retries && mmc_host_is_spi(host)) { 152 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 153 cmd->retries = 0; 154 } 155 156 if (err && cmd->retries && !mmc_card_removed(host->card)) { 157 /* 158 * Request starter must handle retries - see 159 * mmc_wait_for_req_done(). 160 */ 161 if (mrq->done) 162 mrq->done(mrq); 163 } else { 164 mmc_should_fail_request(host, mrq); 165 166 led_trigger_event(host->led, LED_OFF); 167 168 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 169 mmc_hostname(host), cmd->opcode, err, 170 cmd->resp[0], cmd->resp[1], 171 cmd->resp[2], cmd->resp[3]); 172 173 if (mrq->data) { 174 pr_debug("%s: %d bytes transferred: %d\n", 175 mmc_hostname(host), 176 mrq->data->bytes_xfered, mrq->data->error); 177 } 178 179 if (mrq->stop) { 180 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 181 mmc_hostname(host), mrq->stop->opcode, 182 mrq->stop->error, 183 mrq->stop->resp[0], mrq->stop->resp[1], 184 mrq->stop->resp[2], mrq->stop->resp[3]); 185 } 186 187 if (mrq->done) 188 mrq->done(mrq); 189 190 mmc_host_clk_release(host); 191 } 192 } 193 194 EXPORT_SYMBOL(mmc_request_done); 195 196 static void 197 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 198 { 199 #ifdef CONFIG_MMC_DEBUG 200 unsigned int i, sz; 201 struct scatterlist *sg; 202 #endif 203 204 if (mrq->sbc) { 205 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 206 mmc_hostname(host), mrq->sbc->opcode, 207 mrq->sbc->arg, mrq->sbc->flags); 208 } 209 210 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 211 mmc_hostname(host), mrq->cmd->opcode, 212 mrq->cmd->arg, mrq->cmd->flags); 213 214 if (mrq->data) { 215 pr_debug("%s: blksz %d blocks %d flags %08x " 216 "tsac %d ms nsac %d\n", 217 mmc_hostname(host), mrq->data->blksz, 218 mrq->data->blocks, mrq->data->flags, 219 mrq->data->timeout_ns / 1000000, 220 mrq->data->timeout_clks); 221 } 222 223 if (mrq->stop) { 224 pr_debug("%s: CMD%u arg %08x flags %08x\n", 225 mmc_hostname(host), mrq->stop->opcode, 226 mrq->stop->arg, mrq->stop->flags); 227 } 228 229 WARN_ON(!host->claimed); 230 231 mrq->cmd->error = 0; 232 mrq->cmd->mrq = mrq; 233 if (mrq->data) { 234 BUG_ON(mrq->data->blksz > host->max_blk_size); 235 BUG_ON(mrq->data->blocks > host->max_blk_count); 236 BUG_ON(mrq->data->blocks * mrq->data->blksz > 237 host->max_req_size); 238 239 #ifdef CONFIG_MMC_DEBUG 240 sz = 0; 241 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 242 sz += sg->length; 243 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 244 #endif 245 246 mrq->cmd->data = mrq->data; 247 mrq->data->error = 0; 248 mrq->data->mrq = mrq; 249 if (mrq->stop) { 250 mrq->data->stop = mrq->stop; 251 mrq->stop->error = 0; 252 mrq->stop->mrq = mrq; 253 } 254 } 255 mmc_host_clk_hold(host); 256 led_trigger_event(host->led, LED_FULL); 257 host->ops->request(host, mrq); 258 } 259 260 /** 261 * mmc_start_bkops - start BKOPS for supported cards 262 * @card: MMC card to start BKOPS 263 * @form_exception: A flag to indicate if this function was 264 * called due to an exception raised by the card 265 * 266 * Start background operations whenever requested. 267 * When the urgent BKOPS bit is set in a R1 command response 268 * then background operations should be started immediately. 269 */ 270 void mmc_start_bkops(struct mmc_card *card, bool from_exception) 271 { 272 int err; 273 int timeout; 274 bool use_busy_signal; 275 276 BUG_ON(!card); 277 278 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) 279 return; 280 281 err = mmc_read_bkops_status(card); 282 if (err) { 283 pr_err("%s: Failed to read bkops status: %d\n", 284 mmc_hostname(card->host), err); 285 return; 286 } 287 288 if (!card->ext_csd.raw_bkops_status) 289 return; 290 291 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && 292 from_exception) 293 return; 294 295 mmc_claim_host(card->host); 296 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { 297 timeout = MMC_BKOPS_MAX_TIMEOUT; 298 use_busy_signal = true; 299 } else { 300 timeout = 0; 301 use_busy_signal = false; 302 } 303 304 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 305 EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true); 306 if (err) { 307 pr_warn("%s: Error %d starting bkops\n", 308 mmc_hostname(card->host), err); 309 goto out; 310 } 311 312 /* 313 * For urgent bkops status (LEVEL_2 and more) 314 * bkops executed synchronously, otherwise 315 * the operation is in progress 316 */ 317 if (!use_busy_signal) 318 mmc_card_set_doing_bkops(card); 319 out: 320 mmc_release_host(card->host); 321 } 322 EXPORT_SYMBOL(mmc_start_bkops); 323 324 /* 325 * mmc_wait_data_done() - done callback for data request 326 * @mrq: done data request 327 * 328 * Wakes up mmc context, passed as a callback to host controller driver 329 */ 330 static void mmc_wait_data_done(struct mmc_request *mrq) 331 { 332 mrq->host->context_info.is_done_rcv = true; 333 wake_up_interruptible(&mrq->host->context_info.wait); 334 } 335 336 static void mmc_wait_done(struct mmc_request *mrq) 337 { 338 complete(&mrq->completion); 339 } 340 341 /* 342 *__mmc_start_data_req() - starts data request 343 * @host: MMC host to start the request 344 * @mrq: data request to start 345 * 346 * Sets the done callback to be called when request is completed by the card. 347 * Starts data mmc request execution 348 */ 349 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) 350 { 351 mrq->done = mmc_wait_data_done; 352 mrq->host = host; 353 if (mmc_card_removed(host->card)) { 354 mrq->cmd->error = -ENOMEDIUM; 355 mmc_wait_data_done(mrq); 356 return -ENOMEDIUM; 357 } 358 mmc_start_request(host, mrq); 359 360 return 0; 361 } 362 363 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 364 { 365 init_completion(&mrq->completion); 366 mrq->done = mmc_wait_done; 367 if (mmc_card_removed(host->card)) { 368 mrq->cmd->error = -ENOMEDIUM; 369 complete(&mrq->completion); 370 return -ENOMEDIUM; 371 } 372 mmc_start_request(host, mrq); 373 return 0; 374 } 375 376 /* 377 * mmc_wait_for_data_req_done() - wait for request completed 378 * @host: MMC host to prepare the command. 379 * @mrq: MMC request to wait for 380 * 381 * Blocks MMC context till host controller will ack end of data request 382 * execution or new request notification arrives from the block layer. 383 * Handles command retries. 384 * 385 * Returns enum mmc_blk_status after checking errors. 386 */ 387 static int mmc_wait_for_data_req_done(struct mmc_host *host, 388 struct mmc_request *mrq, 389 struct mmc_async_req *next_req) 390 { 391 struct mmc_command *cmd; 392 struct mmc_context_info *context_info = &host->context_info; 393 int err; 394 unsigned long flags; 395 396 while (1) { 397 wait_event_interruptible(context_info->wait, 398 (context_info->is_done_rcv || 399 context_info->is_new_req)); 400 spin_lock_irqsave(&context_info->lock, flags); 401 context_info->is_waiting_last_req = false; 402 spin_unlock_irqrestore(&context_info->lock, flags); 403 if (context_info->is_done_rcv) { 404 context_info->is_done_rcv = false; 405 context_info->is_new_req = false; 406 cmd = mrq->cmd; 407 408 if (!cmd->error || !cmd->retries || 409 mmc_card_removed(host->card)) { 410 err = host->areq->err_check(host->card, 411 host->areq); 412 break; /* return err */ 413 } else { 414 pr_info("%s: req failed (CMD%u): %d, retrying...\n", 415 mmc_hostname(host), 416 cmd->opcode, cmd->error); 417 cmd->retries--; 418 cmd->error = 0; 419 host->ops->request(host, mrq); 420 continue; /* wait for done/new event again */ 421 } 422 } else if (context_info->is_new_req) { 423 context_info->is_new_req = false; 424 if (!next_req) { 425 err = MMC_BLK_NEW_REQUEST; 426 break; /* return err */ 427 } 428 } 429 } 430 return err; 431 } 432 433 static void mmc_wait_for_req_done(struct mmc_host *host, 434 struct mmc_request *mrq) 435 { 436 struct mmc_command *cmd; 437 438 while (1) { 439 wait_for_completion(&mrq->completion); 440 441 cmd = mrq->cmd; 442 443 /* 444 * If host has timed out waiting for the sanitize 445 * to complete, card might be still in programming state 446 * so let's try to bring the card out of programming 447 * state. 448 */ 449 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { 450 if (!mmc_interrupt_hpi(host->card)) { 451 pr_warning("%s: %s: Interrupted sanitize\n", 452 mmc_hostname(host), __func__); 453 cmd->error = 0; 454 break; 455 } else { 456 pr_err("%s: %s: Failed to interrupt sanitize\n", 457 mmc_hostname(host), __func__); 458 } 459 } 460 if (!cmd->error || !cmd->retries || 461 mmc_card_removed(host->card)) 462 break; 463 464 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 465 mmc_hostname(host), cmd->opcode, cmd->error); 466 cmd->retries--; 467 cmd->error = 0; 468 host->ops->request(host, mrq); 469 } 470 } 471 472 /** 473 * mmc_pre_req - Prepare for a new request 474 * @host: MMC host to prepare command 475 * @mrq: MMC request to prepare for 476 * @is_first_req: true if there is no previous started request 477 * that may run in parellel to this call, otherwise false 478 * 479 * mmc_pre_req() is called in prior to mmc_start_req() to let 480 * host prepare for the new request. Preparation of a request may be 481 * performed while another request is running on the host. 482 */ 483 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 484 bool is_first_req) 485 { 486 if (host->ops->pre_req) { 487 mmc_host_clk_hold(host); 488 host->ops->pre_req(host, mrq, is_first_req); 489 mmc_host_clk_release(host); 490 } 491 } 492 493 /** 494 * mmc_post_req - Post process a completed request 495 * @host: MMC host to post process command 496 * @mrq: MMC request to post process for 497 * @err: Error, if non zero, clean up any resources made in pre_req 498 * 499 * Let the host post process a completed request. Post processing of 500 * a request may be performed while another reuqest is running. 501 */ 502 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 503 int err) 504 { 505 if (host->ops->post_req) { 506 mmc_host_clk_hold(host); 507 host->ops->post_req(host, mrq, err); 508 mmc_host_clk_release(host); 509 } 510 } 511 512 /** 513 * mmc_start_req - start a non-blocking request 514 * @host: MMC host to start command 515 * @areq: async request to start 516 * @error: out parameter returns 0 for success, otherwise non zero 517 * 518 * Start a new MMC custom command request for a host. 519 * If there is on ongoing async request wait for completion 520 * of that request and start the new one and return. 521 * Does not wait for the new request to complete. 522 * 523 * Returns the completed request, NULL in case of none completed. 524 * Wait for the an ongoing request (previoulsy started) to complete and 525 * return the completed request. If there is no ongoing request, NULL 526 * is returned without waiting. NULL is not an error condition. 527 */ 528 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 529 struct mmc_async_req *areq, int *error) 530 { 531 int err = 0; 532 int start_err = 0; 533 struct mmc_async_req *data = host->areq; 534 535 /* Prepare a new request */ 536 if (areq) 537 mmc_pre_req(host, areq->mrq, !host->areq); 538 539 if (host->areq) { 540 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); 541 if (err == MMC_BLK_NEW_REQUEST) { 542 if (error) 543 *error = err; 544 /* 545 * The previous request was not completed, 546 * nothing to return 547 */ 548 return NULL; 549 } 550 /* 551 * Check BKOPS urgency for each R1 response 552 */ 553 if (host->card && mmc_card_mmc(host->card) && 554 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || 555 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && 556 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) 557 mmc_start_bkops(host->card, true); 558 } 559 560 if (!err && areq) 561 start_err = __mmc_start_data_req(host, areq->mrq); 562 563 if (host->areq) 564 mmc_post_req(host, host->areq->mrq, 0); 565 566 /* Cancel a prepared request if it was not started. */ 567 if ((err || start_err) && areq) 568 mmc_post_req(host, areq->mrq, -EINVAL); 569 570 if (err) 571 host->areq = NULL; 572 else 573 host->areq = areq; 574 575 if (error) 576 *error = err; 577 return data; 578 } 579 EXPORT_SYMBOL(mmc_start_req); 580 581 /** 582 * mmc_wait_for_req - start a request and wait for completion 583 * @host: MMC host to start command 584 * @mrq: MMC request to start 585 * 586 * Start a new MMC custom command request for a host, and wait 587 * for the command to complete. Does not attempt to parse the 588 * response. 589 */ 590 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 591 { 592 __mmc_start_req(host, mrq); 593 mmc_wait_for_req_done(host, mrq); 594 } 595 EXPORT_SYMBOL(mmc_wait_for_req); 596 597 /** 598 * mmc_interrupt_hpi - Issue for High priority Interrupt 599 * @card: the MMC card associated with the HPI transfer 600 * 601 * Issued High Priority Interrupt, and check for card status 602 * until out-of prg-state. 603 */ 604 int mmc_interrupt_hpi(struct mmc_card *card) 605 { 606 int err; 607 u32 status; 608 unsigned long prg_wait; 609 610 BUG_ON(!card); 611 612 if (!card->ext_csd.hpi_en) { 613 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 614 return 1; 615 } 616 617 mmc_claim_host(card->host); 618 err = mmc_send_status(card, &status); 619 if (err) { 620 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 621 goto out; 622 } 623 624 switch (R1_CURRENT_STATE(status)) { 625 case R1_STATE_IDLE: 626 case R1_STATE_READY: 627 case R1_STATE_STBY: 628 case R1_STATE_TRAN: 629 /* 630 * In idle and transfer states, HPI is not needed and the caller 631 * can issue the next intended command immediately 632 */ 633 goto out; 634 case R1_STATE_PRG: 635 break; 636 default: 637 /* In all other states, it's illegal to issue HPI */ 638 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 639 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 640 err = -EINVAL; 641 goto out; 642 } 643 644 err = mmc_send_hpi_cmd(card, &status); 645 if (err) 646 goto out; 647 648 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 649 do { 650 err = mmc_send_status(card, &status); 651 652 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 653 break; 654 if (time_after(jiffies, prg_wait)) 655 err = -ETIMEDOUT; 656 } while (!err); 657 658 out: 659 mmc_release_host(card->host); 660 return err; 661 } 662 EXPORT_SYMBOL(mmc_interrupt_hpi); 663 664 /** 665 * mmc_wait_for_cmd - start a command and wait for completion 666 * @host: MMC host to start command 667 * @cmd: MMC command to start 668 * @retries: maximum number of retries 669 * 670 * Start a new MMC command for a host, and wait for the command 671 * to complete. Return any error that occurred while the command 672 * was executing. Do not attempt to parse the response. 673 */ 674 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 675 { 676 struct mmc_request mrq = {NULL}; 677 678 WARN_ON(!host->claimed); 679 680 memset(cmd->resp, 0, sizeof(cmd->resp)); 681 cmd->retries = retries; 682 683 mrq.cmd = cmd; 684 cmd->data = NULL; 685 686 mmc_wait_for_req(host, &mrq); 687 688 return cmd->error; 689 } 690 691 EXPORT_SYMBOL(mmc_wait_for_cmd); 692 693 /** 694 * mmc_stop_bkops - stop ongoing BKOPS 695 * @card: MMC card to check BKOPS 696 * 697 * Send HPI command to stop ongoing background operations to 698 * allow rapid servicing of foreground operations, e.g. read/ 699 * writes. Wait until the card comes out of the programming state 700 * to avoid errors in servicing read/write requests. 701 */ 702 int mmc_stop_bkops(struct mmc_card *card) 703 { 704 int err = 0; 705 706 BUG_ON(!card); 707 err = mmc_interrupt_hpi(card); 708 709 /* 710 * If err is EINVAL, we can't issue an HPI. 711 * It should complete the BKOPS. 712 */ 713 if (!err || (err == -EINVAL)) { 714 mmc_card_clr_doing_bkops(card); 715 err = 0; 716 } 717 718 return err; 719 } 720 EXPORT_SYMBOL(mmc_stop_bkops); 721 722 int mmc_read_bkops_status(struct mmc_card *card) 723 { 724 int err; 725 u8 *ext_csd; 726 727 /* 728 * In future work, we should consider storing the entire ext_csd. 729 */ 730 ext_csd = kmalloc(512, GFP_KERNEL); 731 if (!ext_csd) { 732 pr_err("%s: could not allocate buffer to receive the ext_csd.\n", 733 mmc_hostname(card->host)); 734 return -ENOMEM; 735 } 736 737 mmc_claim_host(card->host); 738 err = mmc_send_ext_csd(card, ext_csd); 739 mmc_release_host(card->host); 740 if (err) 741 goto out; 742 743 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 744 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 745 out: 746 kfree(ext_csd); 747 return err; 748 } 749 EXPORT_SYMBOL(mmc_read_bkops_status); 750 751 /** 752 * mmc_set_data_timeout - set the timeout for a data command 753 * @data: data phase for command 754 * @card: the MMC card associated with the data transfer 755 * 756 * Computes the data timeout parameters according to the 757 * correct algorithm given the card type. 758 */ 759 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 760 { 761 unsigned int mult; 762 763 /* 764 * SDIO cards only define an upper 1 s limit on access. 765 */ 766 if (mmc_card_sdio(card)) { 767 data->timeout_ns = 1000000000; 768 data->timeout_clks = 0; 769 return; 770 } 771 772 /* 773 * SD cards use a 100 multiplier rather than 10 774 */ 775 mult = mmc_card_sd(card) ? 100 : 10; 776 777 /* 778 * Scale up the multiplier (and therefore the timeout) by 779 * the r2w factor for writes. 780 */ 781 if (data->flags & MMC_DATA_WRITE) 782 mult <<= card->csd.r2w_factor; 783 784 data->timeout_ns = card->csd.tacc_ns * mult; 785 data->timeout_clks = card->csd.tacc_clks * mult; 786 787 /* 788 * SD cards also have an upper limit on the timeout. 789 */ 790 if (mmc_card_sd(card)) { 791 unsigned int timeout_us, limit_us; 792 793 timeout_us = data->timeout_ns / 1000; 794 if (mmc_host_clk_rate(card->host)) 795 timeout_us += data->timeout_clks * 1000 / 796 (mmc_host_clk_rate(card->host) / 1000); 797 798 if (data->flags & MMC_DATA_WRITE) 799 /* 800 * The MMC spec "It is strongly recommended 801 * for hosts to implement more than 500ms 802 * timeout value even if the card indicates 803 * the 250ms maximum busy length." Even the 804 * previous value of 300ms is known to be 805 * insufficient for some cards. 806 */ 807 limit_us = 3000000; 808 else 809 limit_us = 100000; 810 811 /* 812 * SDHC cards always use these fixed values. 813 */ 814 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 815 data->timeout_ns = limit_us * 1000; 816 data->timeout_clks = 0; 817 } 818 } 819 820 /* 821 * Some cards require longer data read timeout than indicated in CSD. 822 * Address this by setting the read timeout to a "reasonably high" 823 * value. For the cards tested, 300ms has proven enough. If necessary, 824 * this value can be increased if other problematic cards require this. 825 */ 826 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 827 data->timeout_ns = 300000000; 828 data->timeout_clks = 0; 829 } 830 831 /* 832 * Some cards need very high timeouts if driven in SPI mode. 833 * The worst observed timeout was 900ms after writing a 834 * continuous stream of data until the internal logic 835 * overflowed. 836 */ 837 if (mmc_host_is_spi(card->host)) { 838 if (data->flags & MMC_DATA_WRITE) { 839 if (data->timeout_ns < 1000000000) 840 data->timeout_ns = 1000000000; /* 1s */ 841 } else { 842 if (data->timeout_ns < 100000000) 843 data->timeout_ns = 100000000; /* 100ms */ 844 } 845 } 846 } 847 EXPORT_SYMBOL(mmc_set_data_timeout); 848 849 /** 850 * mmc_align_data_size - pads a transfer size to a more optimal value 851 * @card: the MMC card associated with the data transfer 852 * @sz: original transfer size 853 * 854 * Pads the original data size with a number of extra bytes in 855 * order to avoid controller bugs and/or performance hits 856 * (e.g. some controllers revert to PIO for certain sizes). 857 * 858 * Returns the improved size, which might be unmodified. 859 * 860 * Note that this function is only relevant when issuing a 861 * single scatter gather entry. 862 */ 863 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 864 { 865 /* 866 * FIXME: We don't have a system for the controller to tell 867 * the core about its problems yet, so for now we just 32-bit 868 * align the size. 869 */ 870 sz = ((sz + 3) / 4) * 4; 871 872 return sz; 873 } 874 EXPORT_SYMBOL(mmc_align_data_size); 875 876 /** 877 * __mmc_claim_host - exclusively claim a host 878 * @host: mmc host to claim 879 * @abort: whether or not the operation should be aborted 880 * 881 * Claim a host for a set of operations. If @abort is non null and 882 * dereference a non-zero value then this will return prematurely with 883 * that non-zero value without acquiring the lock. Returns zero 884 * with the lock held otherwise. 885 */ 886 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 887 { 888 DECLARE_WAITQUEUE(wait, current); 889 unsigned long flags; 890 int stop; 891 892 might_sleep(); 893 894 add_wait_queue(&host->wq, &wait); 895 spin_lock_irqsave(&host->lock, flags); 896 while (1) { 897 set_current_state(TASK_UNINTERRUPTIBLE); 898 stop = abort ? atomic_read(abort) : 0; 899 if (stop || !host->claimed || host->claimer == current) 900 break; 901 spin_unlock_irqrestore(&host->lock, flags); 902 schedule(); 903 spin_lock_irqsave(&host->lock, flags); 904 } 905 set_current_state(TASK_RUNNING); 906 if (!stop) { 907 host->claimed = 1; 908 host->claimer = current; 909 host->claim_cnt += 1; 910 } else 911 wake_up(&host->wq); 912 spin_unlock_irqrestore(&host->lock, flags); 913 remove_wait_queue(&host->wq, &wait); 914 if (host->ops->enable && !stop && host->claim_cnt == 1) 915 host->ops->enable(host); 916 return stop; 917 } 918 919 EXPORT_SYMBOL(__mmc_claim_host); 920 921 /** 922 * mmc_release_host - release a host 923 * @host: mmc host to release 924 * 925 * Release a MMC host, allowing others to claim the host 926 * for their operations. 927 */ 928 void mmc_release_host(struct mmc_host *host) 929 { 930 unsigned long flags; 931 932 WARN_ON(!host->claimed); 933 934 if (host->ops->disable && host->claim_cnt == 1) 935 host->ops->disable(host); 936 937 spin_lock_irqsave(&host->lock, flags); 938 if (--host->claim_cnt) { 939 /* Release for nested claim */ 940 spin_unlock_irqrestore(&host->lock, flags); 941 } else { 942 host->claimed = 0; 943 host->claimer = NULL; 944 spin_unlock_irqrestore(&host->lock, flags); 945 wake_up(&host->wq); 946 } 947 } 948 EXPORT_SYMBOL(mmc_release_host); 949 950 /* 951 * This is a helper function, which fetches a runtime pm reference for the 952 * card device and also claims the host. 953 */ 954 void mmc_get_card(struct mmc_card *card) 955 { 956 pm_runtime_get_sync(&card->dev); 957 mmc_claim_host(card->host); 958 } 959 EXPORT_SYMBOL(mmc_get_card); 960 961 /* 962 * This is a helper function, which releases the host and drops the runtime 963 * pm reference for the card device. 964 */ 965 void mmc_put_card(struct mmc_card *card) 966 { 967 mmc_release_host(card->host); 968 pm_runtime_mark_last_busy(&card->dev); 969 pm_runtime_put_autosuspend(&card->dev); 970 } 971 EXPORT_SYMBOL(mmc_put_card); 972 973 /* 974 * Internal function that does the actual ios call to the host driver, 975 * optionally printing some debug output. 976 */ 977 static inline void mmc_set_ios(struct mmc_host *host) 978 { 979 struct mmc_ios *ios = &host->ios; 980 981 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 982 "width %u timing %u\n", 983 mmc_hostname(host), ios->clock, ios->bus_mode, 984 ios->power_mode, ios->chip_select, ios->vdd, 985 ios->bus_width, ios->timing); 986 987 if (ios->clock > 0) 988 mmc_set_ungated(host); 989 host->ops->set_ios(host, ios); 990 } 991 992 /* 993 * Control chip select pin on a host. 994 */ 995 void mmc_set_chip_select(struct mmc_host *host, int mode) 996 { 997 mmc_host_clk_hold(host); 998 host->ios.chip_select = mode; 999 mmc_set_ios(host); 1000 mmc_host_clk_release(host); 1001 } 1002 1003 /* 1004 * Sets the host clock to the highest possible frequency that 1005 * is below "hz". 1006 */ 1007 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 1008 { 1009 WARN_ON(hz < host->f_min); 1010 1011 if (hz > host->f_max) 1012 hz = host->f_max; 1013 1014 host->ios.clock = hz; 1015 mmc_set_ios(host); 1016 } 1017 1018 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 1019 { 1020 mmc_host_clk_hold(host); 1021 __mmc_set_clock(host, hz); 1022 mmc_host_clk_release(host); 1023 } 1024 1025 #ifdef CONFIG_MMC_CLKGATE 1026 /* 1027 * This gates the clock by setting it to 0 Hz. 1028 */ 1029 void mmc_gate_clock(struct mmc_host *host) 1030 { 1031 unsigned long flags; 1032 1033 spin_lock_irqsave(&host->clk_lock, flags); 1034 host->clk_old = host->ios.clock; 1035 host->ios.clock = 0; 1036 host->clk_gated = true; 1037 spin_unlock_irqrestore(&host->clk_lock, flags); 1038 mmc_set_ios(host); 1039 } 1040 1041 /* 1042 * This restores the clock from gating by using the cached 1043 * clock value. 1044 */ 1045 void mmc_ungate_clock(struct mmc_host *host) 1046 { 1047 /* 1048 * We should previously have gated the clock, so the clock shall 1049 * be 0 here! The clock may however be 0 during initialization, 1050 * when some request operations are performed before setting 1051 * the frequency. When ungate is requested in that situation 1052 * we just ignore the call. 1053 */ 1054 if (host->clk_old) { 1055 BUG_ON(host->ios.clock); 1056 /* This call will also set host->clk_gated to false */ 1057 __mmc_set_clock(host, host->clk_old); 1058 } 1059 } 1060 1061 void mmc_set_ungated(struct mmc_host *host) 1062 { 1063 unsigned long flags; 1064 1065 /* 1066 * We've been given a new frequency while the clock is gated, 1067 * so make sure we regard this as ungating it. 1068 */ 1069 spin_lock_irqsave(&host->clk_lock, flags); 1070 host->clk_gated = false; 1071 spin_unlock_irqrestore(&host->clk_lock, flags); 1072 } 1073 1074 #else 1075 void mmc_set_ungated(struct mmc_host *host) 1076 { 1077 } 1078 #endif 1079 1080 /* 1081 * Change the bus mode (open drain/push-pull) of a host. 1082 */ 1083 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 1084 { 1085 mmc_host_clk_hold(host); 1086 host->ios.bus_mode = mode; 1087 mmc_set_ios(host); 1088 mmc_host_clk_release(host); 1089 } 1090 1091 /* 1092 * Change data bus width of a host. 1093 */ 1094 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 1095 { 1096 mmc_host_clk_hold(host); 1097 host->ios.bus_width = width; 1098 mmc_set_ios(host); 1099 mmc_host_clk_release(host); 1100 } 1101 1102 /** 1103 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 1104 * @vdd: voltage (mV) 1105 * @low_bits: prefer low bits in boundary cases 1106 * 1107 * This function returns the OCR bit number according to the provided @vdd 1108 * value. If conversion is not possible a negative errno value returned. 1109 * 1110 * Depending on the @low_bits flag the function prefers low or high OCR bits 1111 * on boundary voltages. For example, 1112 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 1113 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 1114 * 1115 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 1116 */ 1117 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 1118 { 1119 const int max_bit = ilog2(MMC_VDD_35_36); 1120 int bit; 1121 1122 if (vdd < 1650 || vdd > 3600) 1123 return -EINVAL; 1124 1125 if (vdd >= 1650 && vdd <= 1950) 1126 return ilog2(MMC_VDD_165_195); 1127 1128 if (low_bits) 1129 vdd -= 1; 1130 1131 /* Base 2000 mV, step 100 mV, bit's base 8. */ 1132 bit = (vdd - 2000) / 100 + 8; 1133 if (bit > max_bit) 1134 return max_bit; 1135 return bit; 1136 } 1137 1138 /** 1139 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 1140 * @vdd_min: minimum voltage value (mV) 1141 * @vdd_max: maximum voltage value (mV) 1142 * 1143 * This function returns the OCR mask bits according to the provided @vdd_min 1144 * and @vdd_max values. If conversion is not possible the function returns 0. 1145 * 1146 * Notes wrt boundary cases: 1147 * This function sets the OCR bits for all boundary voltages, for example 1148 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1149 * MMC_VDD_34_35 mask. 1150 */ 1151 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1152 { 1153 u32 mask = 0; 1154 1155 if (vdd_max < vdd_min) 1156 return 0; 1157 1158 /* Prefer high bits for the boundary vdd_max values. */ 1159 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1160 if (vdd_max < 0) 1161 return 0; 1162 1163 /* Prefer low bits for the boundary vdd_min values. */ 1164 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1165 if (vdd_min < 0) 1166 return 0; 1167 1168 /* Fill the mask, from max bit to min bit. */ 1169 while (vdd_max >= vdd_min) 1170 mask |= 1 << vdd_max--; 1171 1172 return mask; 1173 } 1174 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1175 1176 #ifdef CONFIG_OF 1177 1178 /** 1179 * mmc_of_parse_voltage - return mask of supported voltages 1180 * @np: The device node need to be parsed. 1181 * @mask: mask of voltages available for MMC/SD/SDIO 1182 * 1183 * 1. Return zero on success. 1184 * 2. Return negative errno: voltage-range is invalid. 1185 */ 1186 int mmc_of_parse_voltage(struct device_node *np, u32 *mask) 1187 { 1188 const u32 *voltage_ranges; 1189 int num_ranges, i; 1190 1191 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); 1192 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; 1193 if (!voltage_ranges || !num_ranges) { 1194 pr_info("%s: voltage-ranges unspecified\n", np->full_name); 1195 return -EINVAL; 1196 } 1197 1198 for (i = 0; i < num_ranges; i++) { 1199 const int j = i * 2; 1200 u32 ocr_mask; 1201 1202 ocr_mask = mmc_vddrange_to_ocrmask( 1203 be32_to_cpu(voltage_ranges[j]), 1204 be32_to_cpu(voltage_ranges[j + 1])); 1205 if (!ocr_mask) { 1206 pr_err("%s: voltage-range #%d is invalid\n", 1207 np->full_name, i); 1208 return -EINVAL; 1209 } 1210 *mask |= ocr_mask; 1211 } 1212 1213 return 0; 1214 } 1215 EXPORT_SYMBOL(mmc_of_parse_voltage); 1216 1217 #endif /* CONFIG_OF */ 1218 1219 #ifdef CONFIG_REGULATOR 1220 1221 /** 1222 * mmc_regulator_get_ocrmask - return mask of supported voltages 1223 * @supply: regulator to use 1224 * 1225 * This returns either a negative errno, or a mask of voltages that 1226 * can be provided to MMC/SD/SDIO devices using the specified voltage 1227 * regulator. This would normally be called before registering the 1228 * MMC host adapter. 1229 */ 1230 int mmc_regulator_get_ocrmask(struct regulator *supply) 1231 { 1232 int result = 0; 1233 int count; 1234 int i; 1235 1236 count = regulator_count_voltages(supply); 1237 if (count < 0) 1238 return count; 1239 1240 for (i = 0; i < count; i++) { 1241 int vdd_uV; 1242 int vdd_mV; 1243 1244 vdd_uV = regulator_list_voltage(supply, i); 1245 if (vdd_uV <= 0) 1246 continue; 1247 1248 vdd_mV = vdd_uV / 1000; 1249 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1250 } 1251 1252 return result; 1253 } 1254 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); 1255 1256 /** 1257 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1258 * @mmc: the host to regulate 1259 * @supply: regulator to use 1260 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1261 * 1262 * Returns zero on success, else negative errno. 1263 * 1264 * MMC host drivers may use this to enable or disable a regulator using 1265 * a particular supply voltage. This would normally be called from the 1266 * set_ios() method. 1267 */ 1268 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1269 struct regulator *supply, 1270 unsigned short vdd_bit) 1271 { 1272 int result = 0; 1273 int min_uV, max_uV; 1274 1275 if (vdd_bit) { 1276 int tmp; 1277 int voltage; 1278 1279 /* 1280 * REVISIT mmc_vddrange_to_ocrmask() may have set some 1281 * bits this regulator doesn't quite support ... don't 1282 * be too picky, most cards and regulators are OK with 1283 * a 0.1V range goof (it's a small error percentage). 1284 */ 1285 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1286 if (tmp == 0) { 1287 min_uV = 1650 * 1000; 1288 max_uV = 1950 * 1000; 1289 } else { 1290 min_uV = 1900 * 1000 + tmp * 100 * 1000; 1291 max_uV = min_uV + 100 * 1000; 1292 } 1293 1294 /* 1295 * If we're using a fixed/static regulator, don't call 1296 * regulator_set_voltage; it would fail. 1297 */ 1298 voltage = regulator_get_voltage(supply); 1299 1300 if (!regulator_can_change_voltage(supply)) 1301 min_uV = max_uV = voltage; 1302 1303 if (voltage < 0) 1304 result = voltage; 1305 else if (voltage < min_uV || voltage > max_uV) 1306 result = regulator_set_voltage(supply, min_uV, max_uV); 1307 else 1308 result = 0; 1309 1310 if (result == 0 && !mmc->regulator_enabled) { 1311 result = regulator_enable(supply); 1312 if (!result) 1313 mmc->regulator_enabled = true; 1314 } 1315 } else if (mmc->regulator_enabled) { 1316 result = regulator_disable(supply); 1317 if (result == 0) 1318 mmc->regulator_enabled = false; 1319 } 1320 1321 if (result) 1322 dev_err(mmc_dev(mmc), 1323 "could not set regulator OCR (%d)\n", result); 1324 return result; 1325 } 1326 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1327 1328 int mmc_regulator_get_supply(struct mmc_host *mmc) 1329 { 1330 struct device *dev = mmc_dev(mmc); 1331 struct regulator *supply; 1332 int ret; 1333 1334 supply = devm_regulator_get(dev, "vmmc"); 1335 mmc->supply.vmmc = supply; 1336 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); 1337 1338 if (IS_ERR(supply)) 1339 return PTR_ERR(supply); 1340 1341 ret = mmc_regulator_get_ocrmask(supply); 1342 if (ret > 0) 1343 mmc->ocr_avail = ret; 1344 else 1345 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); 1346 1347 return 0; 1348 } 1349 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1350 1351 #endif /* CONFIG_REGULATOR */ 1352 1353 /* 1354 * Mask off any voltages we don't support and select 1355 * the lowest voltage 1356 */ 1357 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1358 { 1359 int bit; 1360 1361 /* 1362 * Sanity check the voltages that the card claims to 1363 * support. 1364 */ 1365 if (ocr & 0x7F) { 1366 dev_warn(mmc_dev(host), 1367 "card claims to support voltages below defined range\n"); 1368 ocr &= ~0x7F; 1369 } 1370 1371 ocr &= host->ocr_avail; 1372 if (!ocr) { 1373 dev_warn(mmc_dev(host), "no support for card's volts\n"); 1374 return 0; 1375 } 1376 1377 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { 1378 bit = ffs(ocr) - 1; 1379 ocr &= 3 << bit; 1380 mmc_power_cycle(host, ocr); 1381 } else { 1382 bit = fls(ocr) - 1; 1383 ocr &= 3 << bit; 1384 if (bit != host->ios.vdd) 1385 dev_warn(mmc_dev(host), "exceeding card's volts\n"); 1386 } 1387 1388 return ocr; 1389 } 1390 1391 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1392 { 1393 int err = 0; 1394 int old_signal_voltage = host->ios.signal_voltage; 1395 1396 host->ios.signal_voltage = signal_voltage; 1397 if (host->ops->start_signal_voltage_switch) { 1398 mmc_host_clk_hold(host); 1399 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1400 mmc_host_clk_release(host); 1401 } 1402 1403 if (err) 1404 host->ios.signal_voltage = old_signal_voltage; 1405 1406 return err; 1407 1408 } 1409 1410 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) 1411 { 1412 struct mmc_command cmd = {0}; 1413 int err = 0; 1414 u32 clock; 1415 1416 BUG_ON(!host); 1417 1418 /* 1419 * Send CMD11 only if the request is to switch the card to 1420 * 1.8V signalling. 1421 */ 1422 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1423 return __mmc_set_signal_voltage(host, signal_voltage); 1424 1425 /* 1426 * If we cannot switch voltages, return failure so the caller 1427 * can continue without UHS mode 1428 */ 1429 if (!host->ops->start_signal_voltage_switch) 1430 return -EPERM; 1431 if (!host->ops->card_busy) 1432 pr_warning("%s: cannot verify signal voltage switch\n", 1433 mmc_hostname(host)); 1434 1435 cmd.opcode = SD_SWITCH_VOLTAGE; 1436 cmd.arg = 0; 1437 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1438 1439 err = mmc_wait_for_cmd(host, &cmd, 0); 1440 if (err) 1441 return err; 1442 1443 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1444 return -EIO; 1445 1446 mmc_host_clk_hold(host); 1447 /* 1448 * The card should drive cmd and dat[0:3] low immediately 1449 * after the response of cmd11, but wait 1 ms to be sure 1450 */ 1451 mmc_delay(1); 1452 if (host->ops->card_busy && !host->ops->card_busy(host)) { 1453 err = -EAGAIN; 1454 goto power_cycle; 1455 } 1456 /* 1457 * During a signal voltage level switch, the clock must be gated 1458 * for 5 ms according to the SD spec 1459 */ 1460 clock = host->ios.clock; 1461 host->ios.clock = 0; 1462 mmc_set_ios(host); 1463 1464 if (__mmc_set_signal_voltage(host, signal_voltage)) { 1465 /* 1466 * Voltages may not have been switched, but we've already 1467 * sent CMD11, so a power cycle is required anyway 1468 */ 1469 err = -EAGAIN; 1470 goto power_cycle; 1471 } 1472 1473 /* Keep clock gated for at least 5 ms */ 1474 mmc_delay(5); 1475 host->ios.clock = clock; 1476 mmc_set_ios(host); 1477 1478 /* Wait for at least 1 ms according to spec */ 1479 mmc_delay(1); 1480 1481 /* 1482 * Failure to switch is indicated by the card holding 1483 * dat[0:3] low 1484 */ 1485 if (host->ops->card_busy && host->ops->card_busy(host)) 1486 err = -EAGAIN; 1487 1488 power_cycle: 1489 if (err) { 1490 pr_debug("%s: Signal voltage switch failed, " 1491 "power cycling card\n", mmc_hostname(host)); 1492 mmc_power_cycle(host, ocr); 1493 } 1494 1495 mmc_host_clk_release(host); 1496 1497 return err; 1498 } 1499 1500 /* 1501 * Select timing parameters for host. 1502 */ 1503 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1504 { 1505 mmc_host_clk_hold(host); 1506 host->ios.timing = timing; 1507 mmc_set_ios(host); 1508 mmc_host_clk_release(host); 1509 } 1510 1511 /* 1512 * Select appropriate driver type for host. 1513 */ 1514 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1515 { 1516 mmc_host_clk_hold(host); 1517 host->ios.drv_type = drv_type; 1518 mmc_set_ios(host); 1519 mmc_host_clk_release(host); 1520 } 1521 1522 /* 1523 * Apply power to the MMC stack. This is a two-stage process. 1524 * First, we enable power to the card without the clock running. 1525 * We then wait a bit for the power to stabilise. Finally, 1526 * enable the bus drivers and clock to the card. 1527 * 1528 * We must _NOT_ enable the clock prior to power stablising. 1529 * 1530 * If a host does all the power sequencing itself, ignore the 1531 * initial MMC_POWER_UP stage. 1532 */ 1533 void mmc_power_up(struct mmc_host *host, u32 ocr) 1534 { 1535 if (host->ios.power_mode == MMC_POWER_ON) 1536 return; 1537 1538 mmc_host_clk_hold(host); 1539 1540 host->ios.vdd = fls(ocr) - 1; 1541 if (mmc_host_is_spi(host)) 1542 host->ios.chip_select = MMC_CS_HIGH; 1543 else 1544 host->ios.chip_select = MMC_CS_DONTCARE; 1545 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1546 host->ios.power_mode = MMC_POWER_UP; 1547 host->ios.bus_width = MMC_BUS_WIDTH_1; 1548 host->ios.timing = MMC_TIMING_LEGACY; 1549 mmc_set_ios(host); 1550 1551 /* Set signal voltage to 3.3V */ 1552 __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); 1553 1554 /* 1555 * This delay should be sufficient to allow the power supply 1556 * to reach the minimum voltage. 1557 */ 1558 mmc_delay(10); 1559 1560 host->ios.clock = host->f_init; 1561 1562 host->ios.power_mode = MMC_POWER_ON; 1563 mmc_set_ios(host); 1564 1565 /* 1566 * This delay must be at least 74 clock sizes, or 1 ms, or the 1567 * time required to reach a stable voltage. 1568 */ 1569 mmc_delay(10); 1570 1571 mmc_host_clk_release(host); 1572 } 1573 1574 void mmc_power_off(struct mmc_host *host) 1575 { 1576 if (host->ios.power_mode == MMC_POWER_OFF) 1577 return; 1578 1579 mmc_host_clk_hold(host); 1580 1581 host->ios.clock = 0; 1582 host->ios.vdd = 0; 1583 1584 if (!mmc_host_is_spi(host)) { 1585 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1586 host->ios.chip_select = MMC_CS_DONTCARE; 1587 } 1588 host->ios.power_mode = MMC_POWER_OFF; 1589 host->ios.bus_width = MMC_BUS_WIDTH_1; 1590 host->ios.timing = MMC_TIMING_LEGACY; 1591 mmc_set_ios(host); 1592 1593 /* 1594 * Some configurations, such as the 802.11 SDIO card in the OLPC 1595 * XO-1.5, require a short delay after poweroff before the card 1596 * can be successfully turned on again. 1597 */ 1598 mmc_delay(1); 1599 1600 mmc_host_clk_release(host); 1601 } 1602 1603 void mmc_power_cycle(struct mmc_host *host, u32 ocr) 1604 { 1605 mmc_power_off(host); 1606 /* Wait at least 1 ms according to SD spec */ 1607 mmc_delay(1); 1608 mmc_power_up(host, ocr); 1609 } 1610 1611 /* 1612 * Cleanup when the last reference to the bus operator is dropped. 1613 */ 1614 static void __mmc_release_bus(struct mmc_host *host) 1615 { 1616 BUG_ON(!host); 1617 BUG_ON(host->bus_refs); 1618 BUG_ON(!host->bus_dead); 1619 1620 host->bus_ops = NULL; 1621 } 1622 1623 /* 1624 * Increase reference count of bus operator 1625 */ 1626 static inline void mmc_bus_get(struct mmc_host *host) 1627 { 1628 unsigned long flags; 1629 1630 spin_lock_irqsave(&host->lock, flags); 1631 host->bus_refs++; 1632 spin_unlock_irqrestore(&host->lock, flags); 1633 } 1634 1635 /* 1636 * Decrease reference count of bus operator and free it if 1637 * it is the last reference. 1638 */ 1639 static inline void mmc_bus_put(struct mmc_host *host) 1640 { 1641 unsigned long flags; 1642 1643 spin_lock_irqsave(&host->lock, flags); 1644 host->bus_refs--; 1645 if ((host->bus_refs == 0) && host->bus_ops) 1646 __mmc_release_bus(host); 1647 spin_unlock_irqrestore(&host->lock, flags); 1648 } 1649 1650 /* 1651 * Assign a mmc bus handler to a host. Only one bus handler may control a 1652 * host at any given time. 1653 */ 1654 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1655 { 1656 unsigned long flags; 1657 1658 BUG_ON(!host); 1659 BUG_ON(!ops); 1660 1661 WARN_ON(!host->claimed); 1662 1663 spin_lock_irqsave(&host->lock, flags); 1664 1665 BUG_ON(host->bus_ops); 1666 BUG_ON(host->bus_refs); 1667 1668 host->bus_ops = ops; 1669 host->bus_refs = 1; 1670 host->bus_dead = 0; 1671 1672 spin_unlock_irqrestore(&host->lock, flags); 1673 } 1674 1675 /* 1676 * Remove the current bus handler from a host. 1677 */ 1678 void mmc_detach_bus(struct mmc_host *host) 1679 { 1680 unsigned long flags; 1681 1682 BUG_ON(!host); 1683 1684 WARN_ON(!host->claimed); 1685 WARN_ON(!host->bus_ops); 1686 1687 spin_lock_irqsave(&host->lock, flags); 1688 1689 host->bus_dead = 1; 1690 1691 spin_unlock_irqrestore(&host->lock, flags); 1692 1693 mmc_bus_put(host); 1694 } 1695 1696 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, 1697 bool cd_irq) 1698 { 1699 #ifdef CONFIG_MMC_DEBUG 1700 unsigned long flags; 1701 spin_lock_irqsave(&host->lock, flags); 1702 WARN_ON(host->removed); 1703 spin_unlock_irqrestore(&host->lock, flags); 1704 #endif 1705 1706 /* 1707 * If the device is configured as wakeup, we prevent a new sleep for 1708 * 5 s to give provision for user space to consume the event. 1709 */ 1710 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && 1711 device_can_wakeup(mmc_dev(host))) 1712 pm_wakeup_event(mmc_dev(host), 5000); 1713 1714 host->detect_change = 1; 1715 mmc_schedule_delayed_work(&host->detect, delay); 1716 } 1717 1718 /** 1719 * mmc_detect_change - process change of state on a MMC socket 1720 * @host: host which changed state. 1721 * @delay: optional delay to wait before detection (jiffies) 1722 * 1723 * MMC drivers should call this when they detect a card has been 1724 * inserted or removed. The MMC layer will confirm that any 1725 * present card is still functional, and initialize any newly 1726 * inserted. 1727 */ 1728 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1729 { 1730 _mmc_detect_change(host, delay, true); 1731 } 1732 EXPORT_SYMBOL(mmc_detect_change); 1733 1734 void mmc_init_erase(struct mmc_card *card) 1735 { 1736 unsigned int sz; 1737 1738 if (is_power_of_2(card->erase_size)) 1739 card->erase_shift = ffs(card->erase_size) - 1; 1740 else 1741 card->erase_shift = 0; 1742 1743 /* 1744 * It is possible to erase an arbitrarily large area of an SD or MMC 1745 * card. That is not desirable because it can take a long time 1746 * (minutes) potentially delaying more important I/O, and also the 1747 * timeout calculations become increasingly hugely over-estimated. 1748 * Consequently, 'pref_erase' is defined as a guide to limit erases 1749 * to that size and alignment. 1750 * 1751 * For SD cards that define Allocation Unit size, limit erases to one 1752 * Allocation Unit at a time. For MMC cards that define High Capacity 1753 * Erase Size, whether it is switched on or not, limit to that size. 1754 * Otherwise just have a stab at a good value. For modern cards it 1755 * will end up being 4MiB. Note that if the value is too small, it 1756 * can end up taking longer to erase. 1757 */ 1758 if (mmc_card_sd(card) && card->ssr.au) { 1759 card->pref_erase = card->ssr.au; 1760 card->erase_shift = ffs(card->ssr.au) - 1; 1761 } else if (card->ext_csd.hc_erase_size) { 1762 card->pref_erase = card->ext_csd.hc_erase_size; 1763 } else { 1764 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1765 if (sz < 128) 1766 card->pref_erase = 512 * 1024 / 512; 1767 else if (sz < 512) 1768 card->pref_erase = 1024 * 1024 / 512; 1769 else if (sz < 1024) 1770 card->pref_erase = 2 * 1024 * 1024 / 512; 1771 else 1772 card->pref_erase = 4 * 1024 * 1024 / 512; 1773 if (card->pref_erase < card->erase_size) 1774 card->pref_erase = card->erase_size; 1775 else { 1776 sz = card->pref_erase % card->erase_size; 1777 if (sz) 1778 card->pref_erase += card->erase_size - sz; 1779 } 1780 } 1781 } 1782 1783 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1784 unsigned int arg, unsigned int qty) 1785 { 1786 unsigned int erase_timeout; 1787 1788 if (arg == MMC_DISCARD_ARG || 1789 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1790 erase_timeout = card->ext_csd.trim_timeout; 1791 } else if (card->ext_csd.erase_group_def & 1) { 1792 /* High Capacity Erase Group Size uses HC timeouts */ 1793 if (arg == MMC_TRIM_ARG) 1794 erase_timeout = card->ext_csd.trim_timeout; 1795 else 1796 erase_timeout = card->ext_csd.hc_erase_timeout; 1797 } else { 1798 /* CSD Erase Group Size uses write timeout */ 1799 unsigned int mult = (10 << card->csd.r2w_factor); 1800 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1801 unsigned int timeout_us; 1802 1803 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1804 if (card->csd.tacc_ns < 1000000) 1805 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1806 else 1807 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1808 1809 /* 1810 * ios.clock is only a target. The real clock rate might be 1811 * less but not that much less, so fudge it by multiplying by 2. 1812 */ 1813 timeout_clks <<= 1; 1814 timeout_us += (timeout_clks * 1000) / 1815 (mmc_host_clk_rate(card->host) / 1000); 1816 1817 erase_timeout = timeout_us / 1000; 1818 1819 /* 1820 * Theoretically, the calculation could underflow so round up 1821 * to 1ms in that case. 1822 */ 1823 if (!erase_timeout) 1824 erase_timeout = 1; 1825 } 1826 1827 /* Multiplier for secure operations */ 1828 if (arg & MMC_SECURE_ARGS) { 1829 if (arg == MMC_SECURE_ERASE_ARG) 1830 erase_timeout *= card->ext_csd.sec_erase_mult; 1831 else 1832 erase_timeout *= card->ext_csd.sec_trim_mult; 1833 } 1834 1835 erase_timeout *= qty; 1836 1837 /* 1838 * Ensure at least a 1 second timeout for SPI as per 1839 * 'mmc_set_data_timeout()' 1840 */ 1841 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1842 erase_timeout = 1000; 1843 1844 return erase_timeout; 1845 } 1846 1847 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1848 unsigned int arg, 1849 unsigned int qty) 1850 { 1851 unsigned int erase_timeout; 1852 1853 if (card->ssr.erase_timeout) { 1854 /* Erase timeout specified in SD Status Register (SSR) */ 1855 erase_timeout = card->ssr.erase_timeout * qty + 1856 card->ssr.erase_offset; 1857 } else { 1858 /* 1859 * Erase timeout not specified in SD Status Register (SSR) so 1860 * use 250ms per write block. 1861 */ 1862 erase_timeout = 250 * qty; 1863 } 1864 1865 /* Must not be less than 1 second */ 1866 if (erase_timeout < 1000) 1867 erase_timeout = 1000; 1868 1869 return erase_timeout; 1870 } 1871 1872 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1873 unsigned int arg, 1874 unsigned int qty) 1875 { 1876 if (mmc_card_sd(card)) 1877 return mmc_sd_erase_timeout(card, arg, qty); 1878 else 1879 return mmc_mmc_erase_timeout(card, arg, qty); 1880 } 1881 1882 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1883 unsigned int to, unsigned int arg) 1884 { 1885 struct mmc_command cmd = {0}; 1886 unsigned int qty = 0; 1887 unsigned long timeout; 1888 int err; 1889 1890 /* 1891 * qty is used to calculate the erase timeout which depends on how many 1892 * erase groups (or allocation units in SD terminology) are affected. 1893 * We count erasing part of an erase group as one erase group. 1894 * For SD, the allocation units are always a power of 2. For MMC, the 1895 * erase group size is almost certainly also power of 2, but it does not 1896 * seem to insist on that in the JEDEC standard, so we fall back to 1897 * division in that case. SD may not specify an allocation unit size, 1898 * in which case the timeout is based on the number of write blocks. 1899 * 1900 * Note that the timeout for secure trim 2 will only be correct if the 1901 * number of erase groups specified is the same as the total of all 1902 * preceding secure trim 1 commands. Since the power may have been 1903 * lost since the secure trim 1 commands occurred, it is generally 1904 * impossible to calculate the secure trim 2 timeout correctly. 1905 */ 1906 if (card->erase_shift) 1907 qty += ((to >> card->erase_shift) - 1908 (from >> card->erase_shift)) + 1; 1909 else if (mmc_card_sd(card)) 1910 qty += to - from + 1; 1911 else 1912 qty += ((to / card->erase_size) - 1913 (from / card->erase_size)) + 1; 1914 1915 if (!mmc_card_blockaddr(card)) { 1916 from <<= 9; 1917 to <<= 9; 1918 } 1919 1920 if (mmc_card_sd(card)) 1921 cmd.opcode = SD_ERASE_WR_BLK_START; 1922 else 1923 cmd.opcode = MMC_ERASE_GROUP_START; 1924 cmd.arg = from; 1925 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1926 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1927 if (err) { 1928 pr_err("mmc_erase: group start error %d, " 1929 "status %#x\n", err, cmd.resp[0]); 1930 err = -EIO; 1931 goto out; 1932 } 1933 1934 memset(&cmd, 0, sizeof(struct mmc_command)); 1935 if (mmc_card_sd(card)) 1936 cmd.opcode = SD_ERASE_WR_BLK_END; 1937 else 1938 cmd.opcode = MMC_ERASE_GROUP_END; 1939 cmd.arg = to; 1940 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1941 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1942 if (err) { 1943 pr_err("mmc_erase: group end error %d, status %#x\n", 1944 err, cmd.resp[0]); 1945 err = -EIO; 1946 goto out; 1947 } 1948 1949 memset(&cmd, 0, sizeof(struct mmc_command)); 1950 cmd.opcode = MMC_ERASE; 1951 cmd.arg = arg; 1952 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1953 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1954 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1955 if (err) { 1956 pr_err("mmc_erase: erase error %d, status %#x\n", 1957 err, cmd.resp[0]); 1958 err = -EIO; 1959 goto out; 1960 } 1961 1962 if (mmc_host_is_spi(card->host)) 1963 goto out; 1964 1965 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); 1966 do { 1967 memset(&cmd, 0, sizeof(struct mmc_command)); 1968 cmd.opcode = MMC_SEND_STATUS; 1969 cmd.arg = card->rca << 16; 1970 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1971 /* Do not retry else we can't see errors */ 1972 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1973 if (err || (cmd.resp[0] & 0xFDF92000)) { 1974 pr_err("error %d requesting status %#x\n", 1975 err, cmd.resp[0]); 1976 err = -EIO; 1977 goto out; 1978 } 1979 1980 /* Timeout if the device never becomes ready for data and 1981 * never leaves the program state. 1982 */ 1983 if (time_after(jiffies, timeout)) { 1984 pr_err("%s: Card stuck in programming state! %s\n", 1985 mmc_hostname(card->host), __func__); 1986 err = -EIO; 1987 goto out; 1988 } 1989 1990 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1991 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); 1992 out: 1993 return err; 1994 } 1995 1996 /** 1997 * mmc_erase - erase sectors. 1998 * @card: card to erase 1999 * @from: first sector to erase 2000 * @nr: number of sectors to erase 2001 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 2002 * 2003 * Caller must claim host before calling this function. 2004 */ 2005 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 2006 unsigned int arg) 2007 { 2008 unsigned int rem, to = from + nr; 2009 2010 if (!(card->host->caps & MMC_CAP_ERASE) || 2011 !(card->csd.cmdclass & CCC_ERASE)) 2012 return -EOPNOTSUPP; 2013 2014 if (!card->erase_size) 2015 return -EOPNOTSUPP; 2016 2017 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 2018 return -EOPNOTSUPP; 2019 2020 if ((arg & MMC_SECURE_ARGS) && 2021 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 2022 return -EOPNOTSUPP; 2023 2024 if ((arg & MMC_TRIM_ARGS) && 2025 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 2026 return -EOPNOTSUPP; 2027 2028 if (arg == MMC_SECURE_ERASE_ARG) { 2029 if (from % card->erase_size || nr % card->erase_size) 2030 return -EINVAL; 2031 } 2032 2033 if (arg == MMC_ERASE_ARG) { 2034 rem = from % card->erase_size; 2035 if (rem) { 2036 rem = card->erase_size - rem; 2037 from += rem; 2038 if (nr > rem) 2039 nr -= rem; 2040 else 2041 return 0; 2042 } 2043 rem = nr % card->erase_size; 2044 if (rem) 2045 nr -= rem; 2046 } 2047 2048 if (nr == 0) 2049 return 0; 2050 2051 to = from + nr; 2052 2053 if (to <= from) 2054 return -EINVAL; 2055 2056 /* 'from' and 'to' are inclusive */ 2057 to -= 1; 2058 2059 return mmc_do_erase(card, from, to, arg); 2060 } 2061 EXPORT_SYMBOL(mmc_erase); 2062 2063 int mmc_can_erase(struct mmc_card *card) 2064 { 2065 if ((card->host->caps & MMC_CAP_ERASE) && 2066 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 2067 return 1; 2068 return 0; 2069 } 2070 EXPORT_SYMBOL(mmc_can_erase); 2071 2072 int mmc_can_trim(struct mmc_card *card) 2073 { 2074 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 2075 return 1; 2076 return 0; 2077 } 2078 EXPORT_SYMBOL(mmc_can_trim); 2079 2080 int mmc_can_discard(struct mmc_card *card) 2081 { 2082 /* 2083 * As there's no way to detect the discard support bit at v4.5 2084 * use the s/w feature support filed. 2085 */ 2086 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 2087 return 1; 2088 return 0; 2089 } 2090 EXPORT_SYMBOL(mmc_can_discard); 2091 2092 int mmc_can_sanitize(struct mmc_card *card) 2093 { 2094 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 2095 return 0; 2096 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 2097 return 1; 2098 return 0; 2099 } 2100 EXPORT_SYMBOL(mmc_can_sanitize); 2101 2102 int mmc_can_secure_erase_trim(struct mmc_card *card) 2103 { 2104 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 2105 return 1; 2106 return 0; 2107 } 2108 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 2109 2110 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 2111 unsigned int nr) 2112 { 2113 if (!card->erase_size) 2114 return 0; 2115 if (from % card->erase_size || nr % card->erase_size) 2116 return 0; 2117 return 1; 2118 } 2119 EXPORT_SYMBOL(mmc_erase_group_aligned); 2120 2121 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 2122 unsigned int arg) 2123 { 2124 struct mmc_host *host = card->host; 2125 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 2126 unsigned int last_timeout = 0; 2127 2128 if (card->erase_shift) 2129 max_qty = UINT_MAX >> card->erase_shift; 2130 else if (mmc_card_sd(card)) 2131 max_qty = UINT_MAX; 2132 else 2133 max_qty = UINT_MAX / card->erase_size; 2134 2135 /* Find the largest qty with an OK timeout */ 2136 do { 2137 y = 0; 2138 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 2139 timeout = mmc_erase_timeout(card, arg, qty + x); 2140 if (timeout > host->max_discard_to) 2141 break; 2142 if (timeout < last_timeout) 2143 break; 2144 last_timeout = timeout; 2145 y = x; 2146 } 2147 qty += y; 2148 } while (y); 2149 2150 if (!qty) 2151 return 0; 2152 2153 if (qty == 1) 2154 return 1; 2155 2156 /* Convert qty to sectors */ 2157 if (card->erase_shift) 2158 max_discard = --qty << card->erase_shift; 2159 else if (mmc_card_sd(card)) 2160 max_discard = qty; 2161 else 2162 max_discard = --qty * card->erase_size; 2163 2164 return max_discard; 2165 } 2166 2167 unsigned int mmc_calc_max_discard(struct mmc_card *card) 2168 { 2169 struct mmc_host *host = card->host; 2170 unsigned int max_discard, max_trim; 2171 2172 if (!host->max_discard_to) 2173 return UINT_MAX; 2174 2175 /* 2176 * Without erase_group_def set, MMC erase timeout depends on clock 2177 * frequence which can change. In that case, the best choice is 2178 * just the preferred erase size. 2179 */ 2180 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 2181 return card->pref_erase; 2182 2183 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 2184 if (mmc_can_trim(card)) { 2185 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 2186 if (max_trim < max_discard) 2187 max_discard = max_trim; 2188 } else if (max_discard < card->erase_size) { 2189 max_discard = 0; 2190 } 2191 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 2192 mmc_hostname(host), max_discard, host->max_discard_to); 2193 return max_discard; 2194 } 2195 EXPORT_SYMBOL(mmc_calc_max_discard); 2196 2197 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 2198 { 2199 struct mmc_command cmd = {0}; 2200 2201 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 2202 return 0; 2203 2204 cmd.opcode = MMC_SET_BLOCKLEN; 2205 cmd.arg = blocklen; 2206 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2207 return mmc_wait_for_cmd(card->host, &cmd, 5); 2208 } 2209 EXPORT_SYMBOL(mmc_set_blocklen); 2210 2211 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, 2212 bool is_rel_write) 2213 { 2214 struct mmc_command cmd = {0}; 2215 2216 cmd.opcode = MMC_SET_BLOCK_COUNT; 2217 cmd.arg = blockcount & 0x0000FFFF; 2218 if (is_rel_write) 2219 cmd.arg |= 1 << 31; 2220 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2221 return mmc_wait_for_cmd(card->host, &cmd, 5); 2222 } 2223 EXPORT_SYMBOL(mmc_set_blockcount); 2224 2225 static void mmc_hw_reset_for_init(struct mmc_host *host) 2226 { 2227 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2228 return; 2229 mmc_host_clk_hold(host); 2230 host->ops->hw_reset(host); 2231 mmc_host_clk_release(host); 2232 } 2233 2234 int mmc_can_reset(struct mmc_card *card) 2235 { 2236 u8 rst_n_function; 2237 2238 if (!mmc_card_mmc(card)) 2239 return 0; 2240 rst_n_function = card->ext_csd.rst_n_function; 2241 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 2242 return 0; 2243 return 1; 2244 } 2245 EXPORT_SYMBOL(mmc_can_reset); 2246 2247 static int mmc_do_hw_reset(struct mmc_host *host, int check) 2248 { 2249 struct mmc_card *card = host->card; 2250 2251 if (!host->bus_ops->power_restore) 2252 return -EOPNOTSUPP; 2253 2254 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2255 return -EOPNOTSUPP; 2256 2257 if (!card) 2258 return -EINVAL; 2259 2260 if (!mmc_can_reset(card)) 2261 return -EOPNOTSUPP; 2262 2263 mmc_host_clk_hold(host); 2264 mmc_set_clock(host, host->f_init); 2265 2266 host->ops->hw_reset(host); 2267 2268 /* If the reset has happened, then a status command will fail */ 2269 if (check) { 2270 struct mmc_command cmd = {0}; 2271 int err; 2272 2273 cmd.opcode = MMC_SEND_STATUS; 2274 if (!mmc_host_is_spi(card->host)) 2275 cmd.arg = card->rca << 16; 2276 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2277 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2278 if (!err) { 2279 mmc_host_clk_release(host); 2280 return -ENOSYS; 2281 } 2282 } 2283 2284 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 2285 if (mmc_host_is_spi(host)) { 2286 host->ios.chip_select = MMC_CS_HIGH; 2287 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 2288 } else { 2289 host->ios.chip_select = MMC_CS_DONTCARE; 2290 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 2291 } 2292 host->ios.bus_width = MMC_BUS_WIDTH_1; 2293 host->ios.timing = MMC_TIMING_LEGACY; 2294 mmc_set_ios(host); 2295 2296 mmc_host_clk_release(host); 2297 2298 return host->bus_ops->power_restore(host); 2299 } 2300 2301 int mmc_hw_reset(struct mmc_host *host) 2302 { 2303 return mmc_do_hw_reset(host, 0); 2304 } 2305 EXPORT_SYMBOL(mmc_hw_reset); 2306 2307 int mmc_hw_reset_check(struct mmc_host *host) 2308 { 2309 return mmc_do_hw_reset(host, 1); 2310 } 2311 EXPORT_SYMBOL(mmc_hw_reset_check); 2312 2313 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2314 { 2315 host->f_init = freq; 2316 2317 #ifdef CONFIG_MMC_DEBUG 2318 pr_info("%s: %s: trying to init card at %u Hz\n", 2319 mmc_hostname(host), __func__, host->f_init); 2320 #endif 2321 mmc_power_up(host, host->ocr_avail); 2322 2323 /* 2324 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2325 * do a hardware reset if possible. 2326 */ 2327 mmc_hw_reset_for_init(host); 2328 2329 /* 2330 * sdio_reset sends CMD52 to reset card. Since we do not know 2331 * if the card is being re-initialized, just send it. CMD52 2332 * should be ignored by SD/eMMC cards. 2333 */ 2334 sdio_reset(host); 2335 mmc_go_idle(host); 2336 2337 mmc_send_if_cond(host, host->ocr_avail); 2338 2339 /* Order's important: probe SDIO, then SD, then MMC */ 2340 if (!mmc_attach_sdio(host)) 2341 return 0; 2342 if (!mmc_attach_sd(host)) 2343 return 0; 2344 if (!mmc_attach_mmc(host)) 2345 return 0; 2346 2347 mmc_power_off(host); 2348 return -EIO; 2349 } 2350 2351 int _mmc_detect_card_removed(struct mmc_host *host) 2352 { 2353 int ret; 2354 2355 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 2356 return 0; 2357 2358 if (!host->card || mmc_card_removed(host->card)) 2359 return 1; 2360 2361 ret = host->bus_ops->alive(host); 2362 2363 /* 2364 * Card detect status and alive check may be out of sync if card is 2365 * removed slowly, when card detect switch changes while card/slot 2366 * pads are still contacted in hardware (refer to "SD Card Mechanical 2367 * Addendum, Appendix C: Card Detection Switch"). So reschedule a 2368 * detect work 200ms later for this case. 2369 */ 2370 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { 2371 mmc_detect_change(host, msecs_to_jiffies(200)); 2372 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); 2373 } 2374 2375 if (ret) { 2376 mmc_card_set_removed(host->card); 2377 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2378 } 2379 2380 return ret; 2381 } 2382 2383 int mmc_detect_card_removed(struct mmc_host *host) 2384 { 2385 struct mmc_card *card = host->card; 2386 int ret; 2387 2388 WARN_ON(!host->claimed); 2389 2390 if (!card) 2391 return 1; 2392 2393 ret = mmc_card_removed(card); 2394 /* 2395 * The card will be considered unchanged unless we have been asked to 2396 * detect a change or host requires polling to provide card detection. 2397 */ 2398 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 2399 return ret; 2400 2401 host->detect_change = 0; 2402 if (!ret) { 2403 ret = _mmc_detect_card_removed(host); 2404 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { 2405 /* 2406 * Schedule a detect work as soon as possible to let a 2407 * rescan handle the card removal. 2408 */ 2409 cancel_delayed_work(&host->detect); 2410 _mmc_detect_change(host, 0, false); 2411 } 2412 } 2413 2414 return ret; 2415 } 2416 EXPORT_SYMBOL(mmc_detect_card_removed); 2417 2418 void mmc_rescan(struct work_struct *work) 2419 { 2420 struct mmc_host *host = 2421 container_of(work, struct mmc_host, detect.work); 2422 int i; 2423 2424 if (host->rescan_disable) 2425 return; 2426 2427 /* If there is a non-removable card registered, only scan once */ 2428 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) 2429 return; 2430 host->rescan_entered = 1; 2431 2432 mmc_bus_get(host); 2433 2434 /* 2435 * if there is a _removable_ card registered, check whether it is 2436 * still present 2437 */ 2438 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2439 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2440 host->bus_ops->detect(host); 2441 2442 host->detect_change = 0; 2443 2444 /* 2445 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2446 * the card is no longer present. 2447 */ 2448 mmc_bus_put(host); 2449 mmc_bus_get(host); 2450 2451 /* if there still is a card present, stop here */ 2452 if (host->bus_ops != NULL) { 2453 mmc_bus_put(host); 2454 goto out; 2455 } 2456 2457 /* 2458 * Only we can add a new handler, so it's safe to 2459 * release the lock here. 2460 */ 2461 mmc_bus_put(host); 2462 2463 if (host->ops->get_cd && host->ops->get_cd(host) == 0) { 2464 mmc_claim_host(host); 2465 mmc_power_off(host); 2466 mmc_release_host(host); 2467 goto out; 2468 } 2469 2470 mmc_claim_host(host); 2471 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2472 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2473 break; 2474 if (freqs[i] <= host->f_min) 2475 break; 2476 } 2477 mmc_release_host(host); 2478 2479 out: 2480 if (host->caps & MMC_CAP_NEEDS_POLL) 2481 mmc_schedule_delayed_work(&host->detect, HZ); 2482 } 2483 2484 void mmc_start_host(struct mmc_host *host) 2485 { 2486 host->f_init = max(freqs[0], host->f_min); 2487 host->rescan_disable = 0; 2488 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) 2489 mmc_power_off(host); 2490 else 2491 mmc_power_up(host, host->ocr_avail); 2492 _mmc_detect_change(host, 0, false); 2493 } 2494 2495 void mmc_stop_host(struct mmc_host *host) 2496 { 2497 #ifdef CONFIG_MMC_DEBUG 2498 unsigned long flags; 2499 spin_lock_irqsave(&host->lock, flags); 2500 host->removed = 1; 2501 spin_unlock_irqrestore(&host->lock, flags); 2502 #endif 2503 2504 host->rescan_disable = 1; 2505 cancel_delayed_work_sync(&host->detect); 2506 mmc_flush_scheduled_work(); 2507 2508 /* clear pm flags now and let card drivers set them as needed */ 2509 host->pm_flags = 0; 2510 2511 mmc_bus_get(host); 2512 if (host->bus_ops && !host->bus_dead) { 2513 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2514 host->bus_ops->remove(host); 2515 mmc_claim_host(host); 2516 mmc_detach_bus(host); 2517 mmc_power_off(host); 2518 mmc_release_host(host); 2519 mmc_bus_put(host); 2520 return; 2521 } 2522 mmc_bus_put(host); 2523 2524 BUG_ON(host->card); 2525 2526 mmc_power_off(host); 2527 } 2528 2529 int mmc_power_save_host(struct mmc_host *host) 2530 { 2531 int ret = 0; 2532 2533 #ifdef CONFIG_MMC_DEBUG 2534 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2535 #endif 2536 2537 mmc_bus_get(host); 2538 2539 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2540 mmc_bus_put(host); 2541 return -EINVAL; 2542 } 2543 2544 if (host->bus_ops->power_save) 2545 ret = host->bus_ops->power_save(host); 2546 2547 mmc_bus_put(host); 2548 2549 mmc_power_off(host); 2550 2551 return ret; 2552 } 2553 EXPORT_SYMBOL(mmc_power_save_host); 2554 2555 int mmc_power_restore_host(struct mmc_host *host) 2556 { 2557 int ret; 2558 2559 #ifdef CONFIG_MMC_DEBUG 2560 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2561 #endif 2562 2563 mmc_bus_get(host); 2564 2565 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2566 mmc_bus_put(host); 2567 return -EINVAL; 2568 } 2569 2570 mmc_power_up(host, host->card->ocr); 2571 ret = host->bus_ops->power_restore(host); 2572 2573 mmc_bus_put(host); 2574 2575 return ret; 2576 } 2577 EXPORT_SYMBOL(mmc_power_restore_host); 2578 2579 /* 2580 * Flush the cache to the non-volatile storage. 2581 */ 2582 int mmc_flush_cache(struct mmc_card *card) 2583 { 2584 struct mmc_host *host = card->host; 2585 int err = 0; 2586 2587 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2588 return err; 2589 2590 if (mmc_card_mmc(card) && 2591 (card->ext_csd.cache_size > 0) && 2592 (card->ext_csd.cache_ctrl & 1)) { 2593 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2594 EXT_CSD_FLUSH_CACHE, 1, 0); 2595 if (err) 2596 pr_err("%s: cache flush error %d\n", 2597 mmc_hostname(card->host), err); 2598 } 2599 2600 return err; 2601 } 2602 EXPORT_SYMBOL(mmc_flush_cache); 2603 2604 /* 2605 * Turn the cache ON/OFF. 2606 * Turning the cache OFF shall trigger flushing of the data 2607 * to the non-volatile storage. 2608 * This function should be called with host claimed 2609 */ 2610 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2611 { 2612 struct mmc_card *card = host->card; 2613 unsigned int timeout; 2614 int err = 0; 2615 2616 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2617 mmc_card_is_removable(host)) 2618 return err; 2619 2620 if (card && mmc_card_mmc(card) && 2621 (card->ext_csd.cache_size > 0)) { 2622 enable = !!enable; 2623 2624 if (card->ext_csd.cache_ctrl ^ enable) { 2625 timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2626 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2627 EXT_CSD_CACHE_CTRL, enable, timeout); 2628 if (err) 2629 pr_err("%s: cache %s error %d\n", 2630 mmc_hostname(card->host), 2631 enable ? "on" : "off", 2632 err); 2633 else 2634 card->ext_csd.cache_ctrl = enable; 2635 } 2636 } 2637 2638 return err; 2639 } 2640 EXPORT_SYMBOL(mmc_cache_ctrl); 2641 2642 #ifdef CONFIG_PM 2643 2644 /* Do the card removal on suspend if card is assumed removeable 2645 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2646 to sync the card. 2647 */ 2648 int mmc_pm_notify(struct notifier_block *notify_block, 2649 unsigned long mode, void *unused) 2650 { 2651 struct mmc_host *host = container_of( 2652 notify_block, struct mmc_host, pm_notify); 2653 unsigned long flags; 2654 int err = 0; 2655 2656 switch (mode) { 2657 case PM_HIBERNATION_PREPARE: 2658 case PM_SUSPEND_PREPARE: 2659 spin_lock_irqsave(&host->lock, flags); 2660 host->rescan_disable = 1; 2661 spin_unlock_irqrestore(&host->lock, flags); 2662 cancel_delayed_work_sync(&host->detect); 2663 2664 if (!host->bus_ops) 2665 break; 2666 2667 /* Validate prerequisites for suspend */ 2668 if (host->bus_ops->pre_suspend) 2669 err = host->bus_ops->pre_suspend(host); 2670 if (!err && host->bus_ops->suspend) 2671 break; 2672 2673 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2674 host->bus_ops->remove(host); 2675 mmc_claim_host(host); 2676 mmc_detach_bus(host); 2677 mmc_power_off(host); 2678 mmc_release_host(host); 2679 host->pm_flags = 0; 2680 break; 2681 2682 case PM_POST_SUSPEND: 2683 case PM_POST_HIBERNATION: 2684 case PM_POST_RESTORE: 2685 2686 spin_lock_irqsave(&host->lock, flags); 2687 host->rescan_disable = 0; 2688 spin_unlock_irqrestore(&host->lock, flags); 2689 _mmc_detect_change(host, 0, false); 2690 2691 } 2692 2693 return 0; 2694 } 2695 #endif 2696 2697 /** 2698 * mmc_init_context_info() - init synchronization context 2699 * @host: mmc host 2700 * 2701 * Init struct context_info needed to implement asynchronous 2702 * request mechanism, used by mmc core, host driver and mmc requests 2703 * supplier. 2704 */ 2705 void mmc_init_context_info(struct mmc_host *host) 2706 { 2707 spin_lock_init(&host->context_info.lock); 2708 host->context_info.is_new_req = false; 2709 host->context_info.is_done_rcv = false; 2710 host->context_info.is_waiting_last_req = false; 2711 init_waitqueue_head(&host->context_info.wait); 2712 } 2713 2714 static int __init mmc_init(void) 2715 { 2716 int ret; 2717 2718 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2719 if (!workqueue) 2720 return -ENOMEM; 2721 2722 ret = mmc_register_bus(); 2723 if (ret) 2724 goto destroy_workqueue; 2725 2726 ret = mmc_register_host_class(); 2727 if (ret) 2728 goto unregister_bus; 2729 2730 ret = sdio_register_bus(); 2731 if (ret) 2732 goto unregister_host_class; 2733 2734 return 0; 2735 2736 unregister_host_class: 2737 mmc_unregister_host_class(); 2738 unregister_bus: 2739 mmc_unregister_bus(); 2740 destroy_workqueue: 2741 destroy_workqueue(workqueue); 2742 2743 return ret; 2744 } 2745 2746 static void __exit mmc_exit(void) 2747 { 2748 sdio_unregister_bus(); 2749 mmc_unregister_host_class(); 2750 mmc_unregister_bus(); 2751 destroy_workqueue(workqueue); 2752 } 2753 2754 subsys_initcall(mmc_init); 2755 module_exit(mmc_exit); 2756 2757 MODULE_LICENSE("GPL"); 2758