1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/suspend.h> 27 #include <linux/fault-inject.h> 28 #include <linux/random.h> 29 #include <linux/slab.h> 30 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/host.h> 33 #include <linux/mmc/mmc.h> 34 #include <linux/mmc/sd.h> 35 36 #include "core.h" 37 #include "bus.h" 38 #include "host.h" 39 #include "sdio_bus.h" 40 41 #include "mmc_ops.h" 42 #include "sd_ops.h" 43 #include "sdio_ops.h" 44 45 /* If the device is not responding */ 46 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 47 48 /* 49 * Background operations can take a long time, depending on the housekeeping 50 * operations the card has to perform. 51 */ 52 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ 53 54 static struct workqueue_struct *workqueue; 55 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 56 57 /* 58 * Enabling software CRCs on the data blocks can be a significant (30%) 59 * performance cost, and for other reasons may not always be desired. 60 * So we allow it it to be disabled. 61 */ 62 bool use_spi_crc = 1; 63 module_param(use_spi_crc, bool, 0); 64 65 /* 66 * We normally treat cards as removed during suspend if they are not 67 * known to be on a non-removable bus, to avoid the risk of writing 68 * back data to a different card after resume. Allow this to be 69 * overridden if necessary. 70 */ 71 #ifdef CONFIG_MMC_UNSAFE_RESUME 72 bool mmc_assume_removable; 73 #else 74 bool mmc_assume_removable = 1; 75 #endif 76 EXPORT_SYMBOL(mmc_assume_removable); 77 module_param_named(removable, mmc_assume_removable, bool, 0644); 78 MODULE_PARM_DESC( 79 removable, 80 "MMC/SD cards are removable and may be removed during suspend"); 81 82 /* 83 * Internal function. Schedule delayed work in the MMC work queue. 84 */ 85 static int mmc_schedule_delayed_work(struct delayed_work *work, 86 unsigned long delay) 87 { 88 return queue_delayed_work(workqueue, work, delay); 89 } 90 91 /* 92 * Internal function. Flush all scheduled work from the MMC work queue. 93 */ 94 static void mmc_flush_scheduled_work(void) 95 { 96 flush_workqueue(workqueue); 97 } 98 99 #ifdef CONFIG_FAIL_MMC_REQUEST 100 101 /* 102 * Internal function. Inject random data errors. 103 * If mmc_data is NULL no errors are injected. 104 */ 105 static void mmc_should_fail_request(struct mmc_host *host, 106 struct mmc_request *mrq) 107 { 108 struct mmc_command *cmd = mrq->cmd; 109 struct mmc_data *data = mrq->data; 110 static const int data_errors[] = { 111 -ETIMEDOUT, 112 -EILSEQ, 113 -EIO, 114 }; 115 116 if (!data) 117 return; 118 119 if (cmd->error || data->error || 120 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 121 return; 122 123 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 124 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 125 } 126 127 #else /* CONFIG_FAIL_MMC_REQUEST */ 128 129 static inline void mmc_should_fail_request(struct mmc_host *host, 130 struct mmc_request *mrq) 131 { 132 } 133 134 #endif /* CONFIG_FAIL_MMC_REQUEST */ 135 136 /** 137 * mmc_request_done - finish processing an MMC request 138 * @host: MMC host which completed request 139 * @mrq: MMC request which request 140 * 141 * MMC drivers should call this function when they have completed 142 * their processing of a request. 143 */ 144 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 145 { 146 struct mmc_command *cmd = mrq->cmd; 147 int err = cmd->error; 148 149 if (err && cmd->retries && mmc_host_is_spi(host)) { 150 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 151 cmd->retries = 0; 152 } 153 154 if (err && cmd->retries && !mmc_card_removed(host->card)) { 155 /* 156 * Request starter must handle retries - see 157 * mmc_wait_for_req_done(). 158 */ 159 if (mrq->done) 160 mrq->done(mrq); 161 } else { 162 mmc_should_fail_request(host, mrq); 163 164 led_trigger_event(host->led, LED_OFF); 165 166 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 167 mmc_hostname(host), cmd->opcode, err, 168 cmd->resp[0], cmd->resp[1], 169 cmd->resp[2], cmd->resp[3]); 170 171 if (mrq->data) { 172 pr_debug("%s: %d bytes transferred: %d\n", 173 mmc_hostname(host), 174 mrq->data->bytes_xfered, mrq->data->error); 175 } 176 177 if (mrq->stop) { 178 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 179 mmc_hostname(host), mrq->stop->opcode, 180 mrq->stop->error, 181 mrq->stop->resp[0], mrq->stop->resp[1], 182 mrq->stop->resp[2], mrq->stop->resp[3]); 183 } 184 185 if (mrq->done) 186 mrq->done(mrq); 187 188 mmc_host_clk_release(host); 189 } 190 } 191 192 EXPORT_SYMBOL(mmc_request_done); 193 194 static void 195 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 196 { 197 #ifdef CONFIG_MMC_DEBUG 198 unsigned int i, sz; 199 struct scatterlist *sg; 200 #endif 201 202 if (mrq->sbc) { 203 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 204 mmc_hostname(host), mrq->sbc->opcode, 205 mrq->sbc->arg, mrq->sbc->flags); 206 } 207 208 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 209 mmc_hostname(host), mrq->cmd->opcode, 210 mrq->cmd->arg, mrq->cmd->flags); 211 212 if (mrq->data) { 213 pr_debug("%s: blksz %d blocks %d flags %08x " 214 "tsac %d ms nsac %d\n", 215 mmc_hostname(host), mrq->data->blksz, 216 mrq->data->blocks, mrq->data->flags, 217 mrq->data->timeout_ns / 1000000, 218 mrq->data->timeout_clks); 219 } 220 221 if (mrq->stop) { 222 pr_debug("%s: CMD%u arg %08x flags %08x\n", 223 mmc_hostname(host), mrq->stop->opcode, 224 mrq->stop->arg, mrq->stop->flags); 225 } 226 227 WARN_ON(!host->claimed); 228 229 mrq->cmd->error = 0; 230 mrq->cmd->mrq = mrq; 231 if (mrq->data) { 232 BUG_ON(mrq->data->blksz > host->max_blk_size); 233 BUG_ON(mrq->data->blocks > host->max_blk_count); 234 BUG_ON(mrq->data->blocks * mrq->data->blksz > 235 host->max_req_size); 236 237 #ifdef CONFIG_MMC_DEBUG 238 sz = 0; 239 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 240 sz += sg->length; 241 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 242 #endif 243 244 mrq->cmd->data = mrq->data; 245 mrq->data->error = 0; 246 mrq->data->mrq = mrq; 247 if (mrq->stop) { 248 mrq->data->stop = mrq->stop; 249 mrq->stop->error = 0; 250 mrq->stop->mrq = mrq; 251 } 252 } 253 mmc_host_clk_hold(host); 254 led_trigger_event(host->led, LED_FULL); 255 host->ops->request(host, mrq); 256 } 257 258 /** 259 * mmc_start_bkops - start BKOPS for supported cards 260 * @card: MMC card to start BKOPS 261 * @form_exception: A flag to indicate if this function was 262 * called due to an exception raised by the card 263 * 264 * Start background operations whenever requested. 265 * When the urgent BKOPS bit is set in a R1 command response 266 * then background operations should be started immediately. 267 */ 268 void mmc_start_bkops(struct mmc_card *card, bool from_exception) 269 { 270 int err; 271 int timeout; 272 bool use_busy_signal; 273 274 BUG_ON(!card); 275 276 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) 277 return; 278 279 err = mmc_read_bkops_status(card); 280 if (err) { 281 pr_err("%s: Failed to read bkops status: %d\n", 282 mmc_hostname(card->host), err); 283 return; 284 } 285 286 if (!card->ext_csd.raw_bkops_status) 287 return; 288 289 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && 290 from_exception) 291 return; 292 293 mmc_claim_host(card->host); 294 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { 295 timeout = MMC_BKOPS_MAX_TIMEOUT; 296 use_busy_signal = true; 297 } else { 298 timeout = 0; 299 use_busy_signal = false; 300 } 301 302 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 303 EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal); 304 if (err) { 305 pr_warn("%s: Error %d starting bkops\n", 306 mmc_hostname(card->host), err); 307 goto out; 308 } 309 310 /* 311 * For urgent bkops status (LEVEL_2 and more) 312 * bkops executed synchronously, otherwise 313 * the operation is in progress 314 */ 315 if (!use_busy_signal) 316 mmc_card_set_doing_bkops(card); 317 out: 318 mmc_release_host(card->host); 319 } 320 EXPORT_SYMBOL(mmc_start_bkops); 321 322 /* 323 * mmc_wait_data_done() - done callback for data request 324 * @mrq: done data request 325 * 326 * Wakes up mmc context, passed as a callback to host controller driver 327 */ 328 static void mmc_wait_data_done(struct mmc_request *mrq) 329 { 330 mrq->host->context_info.is_done_rcv = true; 331 wake_up_interruptible(&mrq->host->context_info.wait); 332 } 333 334 static void mmc_wait_done(struct mmc_request *mrq) 335 { 336 complete(&mrq->completion); 337 } 338 339 /* 340 *__mmc_start_data_req() - starts data request 341 * @host: MMC host to start the request 342 * @mrq: data request to start 343 * 344 * Sets the done callback to be called when request is completed by the card. 345 * Starts data mmc request execution 346 */ 347 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) 348 { 349 mrq->done = mmc_wait_data_done; 350 mrq->host = host; 351 if (mmc_card_removed(host->card)) { 352 mrq->cmd->error = -ENOMEDIUM; 353 mmc_wait_data_done(mrq); 354 return -ENOMEDIUM; 355 } 356 mmc_start_request(host, mrq); 357 358 return 0; 359 } 360 361 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 362 { 363 init_completion(&mrq->completion); 364 mrq->done = mmc_wait_done; 365 if (mmc_card_removed(host->card)) { 366 mrq->cmd->error = -ENOMEDIUM; 367 complete(&mrq->completion); 368 return -ENOMEDIUM; 369 } 370 mmc_start_request(host, mrq); 371 return 0; 372 } 373 374 /* 375 * mmc_wait_for_data_req_done() - wait for request completed 376 * @host: MMC host to prepare the command. 377 * @mrq: MMC request to wait for 378 * 379 * Blocks MMC context till host controller will ack end of data request 380 * execution or new request notification arrives from the block layer. 381 * Handles command retries. 382 * 383 * Returns enum mmc_blk_status after checking errors. 384 */ 385 static int mmc_wait_for_data_req_done(struct mmc_host *host, 386 struct mmc_request *mrq, 387 struct mmc_async_req *next_req) 388 { 389 struct mmc_command *cmd; 390 struct mmc_context_info *context_info = &host->context_info; 391 int err; 392 unsigned long flags; 393 394 while (1) { 395 wait_event_interruptible(context_info->wait, 396 (context_info->is_done_rcv || 397 context_info->is_new_req)); 398 spin_lock_irqsave(&context_info->lock, flags); 399 context_info->is_waiting_last_req = false; 400 spin_unlock_irqrestore(&context_info->lock, flags); 401 if (context_info->is_done_rcv) { 402 context_info->is_done_rcv = false; 403 context_info->is_new_req = false; 404 cmd = mrq->cmd; 405 406 if (!cmd->error || !cmd->retries || 407 mmc_card_removed(host->card)) { 408 err = host->areq->err_check(host->card, 409 host->areq); 410 break; /* return err */ 411 } else { 412 pr_info("%s: req failed (CMD%u): %d, retrying...\n", 413 mmc_hostname(host), 414 cmd->opcode, cmd->error); 415 cmd->retries--; 416 cmd->error = 0; 417 host->ops->request(host, mrq); 418 continue; /* wait for done/new event again */ 419 } 420 } else if (context_info->is_new_req) { 421 context_info->is_new_req = false; 422 if (!next_req) { 423 err = MMC_BLK_NEW_REQUEST; 424 break; /* return err */ 425 } 426 } 427 } 428 return err; 429 } 430 431 static void mmc_wait_for_req_done(struct mmc_host *host, 432 struct mmc_request *mrq) 433 { 434 struct mmc_command *cmd; 435 436 while (1) { 437 wait_for_completion(&mrq->completion); 438 439 cmd = mrq->cmd; 440 441 /* 442 * If host has timed out waiting for the sanitize 443 * to complete, card might be still in programming state 444 * so let's try to bring the card out of programming 445 * state. 446 */ 447 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { 448 if (!mmc_interrupt_hpi(host->card)) { 449 pr_warning("%s: %s: Interrupted sanitize\n", 450 mmc_hostname(host), __func__); 451 cmd->error = 0; 452 break; 453 } else { 454 pr_err("%s: %s: Failed to interrupt sanitize\n", 455 mmc_hostname(host), __func__); 456 } 457 } 458 if (!cmd->error || !cmd->retries || 459 mmc_card_removed(host->card)) 460 break; 461 462 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 463 mmc_hostname(host), cmd->opcode, cmd->error); 464 cmd->retries--; 465 cmd->error = 0; 466 host->ops->request(host, mrq); 467 } 468 } 469 470 /** 471 * mmc_pre_req - Prepare for a new request 472 * @host: MMC host to prepare command 473 * @mrq: MMC request to prepare for 474 * @is_first_req: true if there is no previous started request 475 * that may run in parellel to this call, otherwise false 476 * 477 * mmc_pre_req() is called in prior to mmc_start_req() to let 478 * host prepare for the new request. Preparation of a request may be 479 * performed while another request is running on the host. 480 */ 481 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 482 bool is_first_req) 483 { 484 if (host->ops->pre_req) { 485 mmc_host_clk_hold(host); 486 host->ops->pre_req(host, mrq, is_first_req); 487 mmc_host_clk_release(host); 488 } 489 } 490 491 /** 492 * mmc_post_req - Post process a completed request 493 * @host: MMC host to post process command 494 * @mrq: MMC request to post process for 495 * @err: Error, if non zero, clean up any resources made in pre_req 496 * 497 * Let the host post process a completed request. Post processing of 498 * a request may be performed while another reuqest is running. 499 */ 500 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 501 int err) 502 { 503 if (host->ops->post_req) { 504 mmc_host_clk_hold(host); 505 host->ops->post_req(host, mrq, err); 506 mmc_host_clk_release(host); 507 } 508 } 509 510 /** 511 * mmc_start_req - start a non-blocking request 512 * @host: MMC host to start command 513 * @areq: async request to start 514 * @error: out parameter returns 0 for success, otherwise non zero 515 * 516 * Start a new MMC custom command request for a host. 517 * If there is on ongoing async request wait for completion 518 * of that request and start the new one and return. 519 * Does not wait for the new request to complete. 520 * 521 * Returns the completed request, NULL in case of none completed. 522 * Wait for the an ongoing request (previoulsy started) to complete and 523 * return the completed request. If there is no ongoing request, NULL 524 * is returned without waiting. NULL is not an error condition. 525 */ 526 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 527 struct mmc_async_req *areq, int *error) 528 { 529 int err = 0; 530 int start_err = 0; 531 struct mmc_async_req *data = host->areq; 532 533 /* Prepare a new request */ 534 if (areq) 535 mmc_pre_req(host, areq->mrq, !host->areq); 536 537 if (host->areq) { 538 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); 539 if (err == MMC_BLK_NEW_REQUEST) { 540 if (error) 541 *error = err; 542 /* 543 * The previous request was not completed, 544 * nothing to return 545 */ 546 return NULL; 547 } 548 /* 549 * Check BKOPS urgency for each R1 response 550 */ 551 if (host->card && mmc_card_mmc(host->card) && 552 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || 553 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && 554 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) 555 mmc_start_bkops(host->card, true); 556 } 557 558 if (!err && areq) 559 start_err = __mmc_start_data_req(host, areq->mrq); 560 561 if (host->areq) 562 mmc_post_req(host, host->areq->mrq, 0); 563 564 /* Cancel a prepared request if it was not started. */ 565 if ((err || start_err) && areq) 566 mmc_post_req(host, areq->mrq, -EINVAL); 567 568 if (err) 569 host->areq = NULL; 570 else 571 host->areq = areq; 572 573 if (error) 574 *error = err; 575 return data; 576 } 577 EXPORT_SYMBOL(mmc_start_req); 578 579 /** 580 * mmc_wait_for_req - start a request and wait for completion 581 * @host: MMC host to start command 582 * @mrq: MMC request to start 583 * 584 * Start a new MMC custom command request for a host, and wait 585 * for the command to complete. Does not attempt to parse the 586 * response. 587 */ 588 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 589 { 590 __mmc_start_req(host, mrq); 591 mmc_wait_for_req_done(host, mrq); 592 } 593 EXPORT_SYMBOL(mmc_wait_for_req); 594 595 /** 596 * mmc_interrupt_hpi - Issue for High priority Interrupt 597 * @card: the MMC card associated with the HPI transfer 598 * 599 * Issued High Priority Interrupt, and check for card status 600 * until out-of prg-state. 601 */ 602 int mmc_interrupt_hpi(struct mmc_card *card) 603 { 604 int err; 605 u32 status; 606 unsigned long prg_wait; 607 608 BUG_ON(!card); 609 610 if (!card->ext_csd.hpi_en) { 611 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 612 return 1; 613 } 614 615 mmc_claim_host(card->host); 616 err = mmc_send_status(card, &status); 617 if (err) { 618 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 619 goto out; 620 } 621 622 switch (R1_CURRENT_STATE(status)) { 623 case R1_STATE_IDLE: 624 case R1_STATE_READY: 625 case R1_STATE_STBY: 626 case R1_STATE_TRAN: 627 /* 628 * In idle and transfer states, HPI is not needed and the caller 629 * can issue the next intended command immediately 630 */ 631 goto out; 632 case R1_STATE_PRG: 633 break; 634 default: 635 /* In all other states, it's illegal to issue HPI */ 636 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 637 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 638 err = -EINVAL; 639 goto out; 640 } 641 642 err = mmc_send_hpi_cmd(card, &status); 643 if (err) 644 goto out; 645 646 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 647 do { 648 err = mmc_send_status(card, &status); 649 650 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 651 break; 652 if (time_after(jiffies, prg_wait)) 653 err = -ETIMEDOUT; 654 } while (!err); 655 656 out: 657 mmc_release_host(card->host); 658 return err; 659 } 660 EXPORT_SYMBOL(mmc_interrupt_hpi); 661 662 /** 663 * mmc_wait_for_cmd - start a command and wait for completion 664 * @host: MMC host to start command 665 * @cmd: MMC command to start 666 * @retries: maximum number of retries 667 * 668 * Start a new MMC command for a host, and wait for the command 669 * to complete. Return any error that occurred while the command 670 * was executing. Do not attempt to parse the response. 671 */ 672 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 673 { 674 struct mmc_request mrq = {NULL}; 675 676 WARN_ON(!host->claimed); 677 678 memset(cmd->resp, 0, sizeof(cmd->resp)); 679 cmd->retries = retries; 680 681 mrq.cmd = cmd; 682 cmd->data = NULL; 683 684 mmc_wait_for_req(host, &mrq); 685 686 return cmd->error; 687 } 688 689 EXPORT_SYMBOL(mmc_wait_for_cmd); 690 691 /** 692 * mmc_stop_bkops - stop ongoing BKOPS 693 * @card: MMC card to check BKOPS 694 * 695 * Send HPI command to stop ongoing background operations to 696 * allow rapid servicing of foreground operations, e.g. read/ 697 * writes. Wait until the card comes out of the programming state 698 * to avoid errors in servicing read/write requests. 699 */ 700 int mmc_stop_bkops(struct mmc_card *card) 701 { 702 int err = 0; 703 704 BUG_ON(!card); 705 err = mmc_interrupt_hpi(card); 706 707 /* 708 * If err is EINVAL, we can't issue an HPI. 709 * It should complete the BKOPS. 710 */ 711 if (!err || (err == -EINVAL)) { 712 mmc_card_clr_doing_bkops(card); 713 err = 0; 714 } 715 716 return err; 717 } 718 EXPORT_SYMBOL(mmc_stop_bkops); 719 720 int mmc_read_bkops_status(struct mmc_card *card) 721 { 722 int err; 723 u8 *ext_csd; 724 725 /* 726 * In future work, we should consider storing the entire ext_csd. 727 */ 728 ext_csd = kmalloc(512, GFP_KERNEL); 729 if (!ext_csd) { 730 pr_err("%s: could not allocate buffer to receive the ext_csd.\n", 731 mmc_hostname(card->host)); 732 return -ENOMEM; 733 } 734 735 mmc_claim_host(card->host); 736 err = mmc_send_ext_csd(card, ext_csd); 737 mmc_release_host(card->host); 738 if (err) 739 goto out; 740 741 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 742 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 743 out: 744 kfree(ext_csd); 745 return err; 746 } 747 EXPORT_SYMBOL(mmc_read_bkops_status); 748 749 /** 750 * mmc_set_data_timeout - set the timeout for a data command 751 * @data: data phase for command 752 * @card: the MMC card associated with the data transfer 753 * 754 * Computes the data timeout parameters according to the 755 * correct algorithm given the card type. 756 */ 757 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 758 { 759 unsigned int mult; 760 761 /* 762 * SDIO cards only define an upper 1 s limit on access. 763 */ 764 if (mmc_card_sdio(card)) { 765 data->timeout_ns = 1000000000; 766 data->timeout_clks = 0; 767 return; 768 } 769 770 /* 771 * SD cards use a 100 multiplier rather than 10 772 */ 773 mult = mmc_card_sd(card) ? 100 : 10; 774 775 /* 776 * Scale up the multiplier (and therefore the timeout) by 777 * the r2w factor for writes. 778 */ 779 if (data->flags & MMC_DATA_WRITE) 780 mult <<= card->csd.r2w_factor; 781 782 data->timeout_ns = card->csd.tacc_ns * mult; 783 data->timeout_clks = card->csd.tacc_clks * mult; 784 785 /* 786 * SD cards also have an upper limit on the timeout. 787 */ 788 if (mmc_card_sd(card)) { 789 unsigned int timeout_us, limit_us; 790 791 timeout_us = data->timeout_ns / 1000; 792 if (mmc_host_clk_rate(card->host)) 793 timeout_us += data->timeout_clks * 1000 / 794 (mmc_host_clk_rate(card->host) / 1000); 795 796 if (data->flags & MMC_DATA_WRITE) 797 /* 798 * The MMC spec "It is strongly recommended 799 * for hosts to implement more than 500ms 800 * timeout value even if the card indicates 801 * the 250ms maximum busy length." Even the 802 * previous value of 300ms is known to be 803 * insufficient for some cards. 804 */ 805 limit_us = 3000000; 806 else 807 limit_us = 100000; 808 809 /* 810 * SDHC cards always use these fixed values. 811 */ 812 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 813 data->timeout_ns = limit_us * 1000; 814 data->timeout_clks = 0; 815 } 816 } 817 818 /* 819 * Some cards require longer data read timeout than indicated in CSD. 820 * Address this by setting the read timeout to a "reasonably high" 821 * value. For the cards tested, 300ms has proven enough. If necessary, 822 * this value can be increased if other problematic cards require this. 823 */ 824 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 825 data->timeout_ns = 300000000; 826 data->timeout_clks = 0; 827 } 828 829 /* 830 * Some cards need very high timeouts if driven in SPI mode. 831 * The worst observed timeout was 900ms after writing a 832 * continuous stream of data until the internal logic 833 * overflowed. 834 */ 835 if (mmc_host_is_spi(card->host)) { 836 if (data->flags & MMC_DATA_WRITE) { 837 if (data->timeout_ns < 1000000000) 838 data->timeout_ns = 1000000000; /* 1s */ 839 } else { 840 if (data->timeout_ns < 100000000) 841 data->timeout_ns = 100000000; /* 100ms */ 842 } 843 } 844 } 845 EXPORT_SYMBOL(mmc_set_data_timeout); 846 847 /** 848 * mmc_align_data_size - pads a transfer size to a more optimal value 849 * @card: the MMC card associated with the data transfer 850 * @sz: original transfer size 851 * 852 * Pads the original data size with a number of extra bytes in 853 * order to avoid controller bugs and/or performance hits 854 * (e.g. some controllers revert to PIO for certain sizes). 855 * 856 * Returns the improved size, which might be unmodified. 857 * 858 * Note that this function is only relevant when issuing a 859 * single scatter gather entry. 860 */ 861 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 862 { 863 /* 864 * FIXME: We don't have a system for the controller to tell 865 * the core about its problems yet, so for now we just 32-bit 866 * align the size. 867 */ 868 sz = ((sz + 3) / 4) * 4; 869 870 return sz; 871 } 872 EXPORT_SYMBOL(mmc_align_data_size); 873 874 /** 875 * __mmc_claim_host - exclusively claim a host 876 * @host: mmc host to claim 877 * @abort: whether or not the operation should be aborted 878 * 879 * Claim a host for a set of operations. If @abort is non null and 880 * dereference a non-zero value then this will return prematurely with 881 * that non-zero value without acquiring the lock. Returns zero 882 * with the lock held otherwise. 883 */ 884 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 885 { 886 DECLARE_WAITQUEUE(wait, current); 887 unsigned long flags; 888 int stop; 889 890 might_sleep(); 891 892 add_wait_queue(&host->wq, &wait); 893 spin_lock_irqsave(&host->lock, flags); 894 while (1) { 895 set_current_state(TASK_UNINTERRUPTIBLE); 896 stop = abort ? atomic_read(abort) : 0; 897 if (stop || !host->claimed || host->claimer == current) 898 break; 899 spin_unlock_irqrestore(&host->lock, flags); 900 schedule(); 901 spin_lock_irqsave(&host->lock, flags); 902 } 903 set_current_state(TASK_RUNNING); 904 if (!stop) { 905 host->claimed = 1; 906 host->claimer = current; 907 host->claim_cnt += 1; 908 } else 909 wake_up(&host->wq); 910 spin_unlock_irqrestore(&host->lock, flags); 911 remove_wait_queue(&host->wq, &wait); 912 if (host->ops->enable && !stop && host->claim_cnt == 1) 913 host->ops->enable(host); 914 return stop; 915 } 916 917 EXPORT_SYMBOL(__mmc_claim_host); 918 919 /** 920 * mmc_try_claim_host - try exclusively to claim a host 921 * @host: mmc host to claim 922 * 923 * Returns %1 if the host is claimed, %0 otherwise. 924 */ 925 int mmc_try_claim_host(struct mmc_host *host) 926 { 927 int claimed_host = 0; 928 unsigned long flags; 929 930 spin_lock_irqsave(&host->lock, flags); 931 if (!host->claimed || host->claimer == current) { 932 host->claimed = 1; 933 host->claimer = current; 934 host->claim_cnt += 1; 935 claimed_host = 1; 936 } 937 spin_unlock_irqrestore(&host->lock, flags); 938 if (host->ops->enable && claimed_host && host->claim_cnt == 1) 939 host->ops->enable(host); 940 return claimed_host; 941 } 942 EXPORT_SYMBOL(mmc_try_claim_host); 943 944 /** 945 * mmc_release_host - release a host 946 * @host: mmc host to release 947 * 948 * Release a MMC host, allowing others to claim the host 949 * for their operations. 950 */ 951 void mmc_release_host(struct mmc_host *host) 952 { 953 unsigned long flags; 954 955 WARN_ON(!host->claimed); 956 957 if (host->ops->disable && host->claim_cnt == 1) 958 host->ops->disable(host); 959 960 spin_lock_irqsave(&host->lock, flags); 961 if (--host->claim_cnt) { 962 /* Release for nested claim */ 963 spin_unlock_irqrestore(&host->lock, flags); 964 } else { 965 host->claimed = 0; 966 host->claimer = NULL; 967 spin_unlock_irqrestore(&host->lock, flags); 968 wake_up(&host->wq); 969 } 970 } 971 EXPORT_SYMBOL(mmc_release_host); 972 973 /* 974 * This is a helper function, which fetches a runtime pm reference for the 975 * card device and also claims the host. 976 */ 977 void mmc_get_card(struct mmc_card *card) 978 { 979 pm_runtime_get_sync(&card->dev); 980 mmc_claim_host(card->host); 981 } 982 EXPORT_SYMBOL(mmc_get_card); 983 984 /* 985 * This is a helper function, which releases the host and drops the runtime 986 * pm reference for the card device. 987 */ 988 void mmc_put_card(struct mmc_card *card) 989 { 990 mmc_release_host(card->host); 991 pm_runtime_mark_last_busy(&card->dev); 992 pm_runtime_put_autosuspend(&card->dev); 993 } 994 EXPORT_SYMBOL(mmc_put_card); 995 996 /* 997 * Internal function that does the actual ios call to the host driver, 998 * optionally printing some debug output. 999 */ 1000 static inline void mmc_set_ios(struct mmc_host *host) 1001 { 1002 struct mmc_ios *ios = &host->ios; 1003 1004 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 1005 "width %u timing %u\n", 1006 mmc_hostname(host), ios->clock, ios->bus_mode, 1007 ios->power_mode, ios->chip_select, ios->vdd, 1008 ios->bus_width, ios->timing); 1009 1010 if (ios->clock > 0) 1011 mmc_set_ungated(host); 1012 host->ops->set_ios(host, ios); 1013 } 1014 1015 /* 1016 * Control chip select pin on a host. 1017 */ 1018 void mmc_set_chip_select(struct mmc_host *host, int mode) 1019 { 1020 mmc_host_clk_hold(host); 1021 host->ios.chip_select = mode; 1022 mmc_set_ios(host); 1023 mmc_host_clk_release(host); 1024 } 1025 1026 /* 1027 * Sets the host clock to the highest possible frequency that 1028 * is below "hz". 1029 */ 1030 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 1031 { 1032 WARN_ON(hz < host->f_min); 1033 1034 if (hz > host->f_max) 1035 hz = host->f_max; 1036 1037 host->ios.clock = hz; 1038 mmc_set_ios(host); 1039 } 1040 1041 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 1042 { 1043 mmc_host_clk_hold(host); 1044 __mmc_set_clock(host, hz); 1045 mmc_host_clk_release(host); 1046 } 1047 1048 #ifdef CONFIG_MMC_CLKGATE 1049 /* 1050 * This gates the clock by setting it to 0 Hz. 1051 */ 1052 void mmc_gate_clock(struct mmc_host *host) 1053 { 1054 unsigned long flags; 1055 1056 spin_lock_irqsave(&host->clk_lock, flags); 1057 host->clk_old = host->ios.clock; 1058 host->ios.clock = 0; 1059 host->clk_gated = true; 1060 spin_unlock_irqrestore(&host->clk_lock, flags); 1061 mmc_set_ios(host); 1062 } 1063 1064 /* 1065 * This restores the clock from gating by using the cached 1066 * clock value. 1067 */ 1068 void mmc_ungate_clock(struct mmc_host *host) 1069 { 1070 /* 1071 * We should previously have gated the clock, so the clock shall 1072 * be 0 here! The clock may however be 0 during initialization, 1073 * when some request operations are performed before setting 1074 * the frequency. When ungate is requested in that situation 1075 * we just ignore the call. 1076 */ 1077 if (host->clk_old) { 1078 BUG_ON(host->ios.clock); 1079 /* This call will also set host->clk_gated to false */ 1080 __mmc_set_clock(host, host->clk_old); 1081 } 1082 } 1083 1084 void mmc_set_ungated(struct mmc_host *host) 1085 { 1086 unsigned long flags; 1087 1088 /* 1089 * We've been given a new frequency while the clock is gated, 1090 * so make sure we regard this as ungating it. 1091 */ 1092 spin_lock_irqsave(&host->clk_lock, flags); 1093 host->clk_gated = false; 1094 spin_unlock_irqrestore(&host->clk_lock, flags); 1095 } 1096 1097 #else 1098 void mmc_set_ungated(struct mmc_host *host) 1099 { 1100 } 1101 #endif 1102 1103 /* 1104 * Change the bus mode (open drain/push-pull) of a host. 1105 */ 1106 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 1107 { 1108 mmc_host_clk_hold(host); 1109 host->ios.bus_mode = mode; 1110 mmc_set_ios(host); 1111 mmc_host_clk_release(host); 1112 } 1113 1114 /* 1115 * Change data bus width of a host. 1116 */ 1117 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 1118 { 1119 mmc_host_clk_hold(host); 1120 host->ios.bus_width = width; 1121 mmc_set_ios(host); 1122 mmc_host_clk_release(host); 1123 } 1124 1125 /** 1126 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 1127 * @vdd: voltage (mV) 1128 * @low_bits: prefer low bits in boundary cases 1129 * 1130 * This function returns the OCR bit number according to the provided @vdd 1131 * value. If conversion is not possible a negative errno value returned. 1132 * 1133 * Depending on the @low_bits flag the function prefers low or high OCR bits 1134 * on boundary voltages. For example, 1135 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 1136 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 1137 * 1138 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 1139 */ 1140 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 1141 { 1142 const int max_bit = ilog2(MMC_VDD_35_36); 1143 int bit; 1144 1145 if (vdd < 1650 || vdd > 3600) 1146 return -EINVAL; 1147 1148 if (vdd >= 1650 && vdd <= 1950) 1149 return ilog2(MMC_VDD_165_195); 1150 1151 if (low_bits) 1152 vdd -= 1; 1153 1154 /* Base 2000 mV, step 100 mV, bit's base 8. */ 1155 bit = (vdd - 2000) / 100 + 8; 1156 if (bit > max_bit) 1157 return max_bit; 1158 return bit; 1159 } 1160 1161 /** 1162 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 1163 * @vdd_min: minimum voltage value (mV) 1164 * @vdd_max: maximum voltage value (mV) 1165 * 1166 * This function returns the OCR mask bits according to the provided @vdd_min 1167 * and @vdd_max values. If conversion is not possible the function returns 0. 1168 * 1169 * Notes wrt boundary cases: 1170 * This function sets the OCR bits for all boundary voltages, for example 1171 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1172 * MMC_VDD_34_35 mask. 1173 */ 1174 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1175 { 1176 u32 mask = 0; 1177 1178 if (vdd_max < vdd_min) 1179 return 0; 1180 1181 /* Prefer high bits for the boundary vdd_max values. */ 1182 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1183 if (vdd_max < 0) 1184 return 0; 1185 1186 /* Prefer low bits for the boundary vdd_min values. */ 1187 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1188 if (vdd_min < 0) 1189 return 0; 1190 1191 /* Fill the mask, from max bit to min bit. */ 1192 while (vdd_max >= vdd_min) 1193 mask |= 1 << vdd_max--; 1194 1195 return mask; 1196 } 1197 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1198 1199 #ifdef CONFIG_REGULATOR 1200 1201 /** 1202 * mmc_regulator_get_ocrmask - return mask of supported voltages 1203 * @supply: regulator to use 1204 * 1205 * This returns either a negative errno, or a mask of voltages that 1206 * can be provided to MMC/SD/SDIO devices using the specified voltage 1207 * regulator. This would normally be called before registering the 1208 * MMC host adapter. 1209 */ 1210 int mmc_regulator_get_ocrmask(struct regulator *supply) 1211 { 1212 int result = 0; 1213 int count; 1214 int i; 1215 1216 count = regulator_count_voltages(supply); 1217 if (count < 0) 1218 return count; 1219 1220 for (i = 0; i < count; i++) { 1221 int vdd_uV; 1222 int vdd_mV; 1223 1224 vdd_uV = regulator_list_voltage(supply, i); 1225 if (vdd_uV <= 0) 1226 continue; 1227 1228 vdd_mV = vdd_uV / 1000; 1229 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1230 } 1231 1232 return result; 1233 } 1234 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); 1235 1236 /** 1237 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1238 * @mmc: the host to regulate 1239 * @supply: regulator to use 1240 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1241 * 1242 * Returns zero on success, else negative errno. 1243 * 1244 * MMC host drivers may use this to enable or disable a regulator using 1245 * a particular supply voltage. This would normally be called from the 1246 * set_ios() method. 1247 */ 1248 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1249 struct regulator *supply, 1250 unsigned short vdd_bit) 1251 { 1252 int result = 0; 1253 int min_uV, max_uV; 1254 1255 if (vdd_bit) { 1256 int tmp; 1257 int voltage; 1258 1259 /* 1260 * REVISIT mmc_vddrange_to_ocrmask() may have set some 1261 * bits this regulator doesn't quite support ... don't 1262 * be too picky, most cards and regulators are OK with 1263 * a 0.1V range goof (it's a small error percentage). 1264 */ 1265 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1266 if (tmp == 0) { 1267 min_uV = 1650 * 1000; 1268 max_uV = 1950 * 1000; 1269 } else { 1270 min_uV = 1900 * 1000 + tmp * 100 * 1000; 1271 max_uV = min_uV + 100 * 1000; 1272 } 1273 1274 /* 1275 * If we're using a fixed/static regulator, don't call 1276 * regulator_set_voltage; it would fail. 1277 */ 1278 voltage = regulator_get_voltage(supply); 1279 1280 if (!regulator_can_change_voltage(supply)) 1281 min_uV = max_uV = voltage; 1282 1283 if (voltage < 0) 1284 result = voltage; 1285 else if (voltage < min_uV || voltage > max_uV) 1286 result = regulator_set_voltage(supply, min_uV, max_uV); 1287 else 1288 result = 0; 1289 1290 if (result == 0 && !mmc->regulator_enabled) { 1291 result = regulator_enable(supply); 1292 if (!result) 1293 mmc->regulator_enabled = true; 1294 } 1295 } else if (mmc->regulator_enabled) { 1296 result = regulator_disable(supply); 1297 if (result == 0) 1298 mmc->regulator_enabled = false; 1299 } 1300 1301 if (result) 1302 dev_err(mmc_dev(mmc), 1303 "could not set regulator OCR (%d)\n", result); 1304 return result; 1305 } 1306 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1307 1308 int mmc_regulator_get_supply(struct mmc_host *mmc) 1309 { 1310 struct device *dev = mmc_dev(mmc); 1311 struct regulator *supply; 1312 int ret; 1313 1314 supply = devm_regulator_get(dev, "vmmc"); 1315 mmc->supply.vmmc = supply; 1316 mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc"); 1317 1318 if (IS_ERR(supply)) 1319 return PTR_ERR(supply); 1320 1321 ret = mmc_regulator_get_ocrmask(supply); 1322 if (ret > 0) 1323 mmc->ocr_avail = ret; 1324 else 1325 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); 1326 1327 return 0; 1328 } 1329 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1330 1331 #endif /* CONFIG_REGULATOR */ 1332 1333 /* 1334 * Mask off any voltages we don't support and select 1335 * the lowest voltage 1336 */ 1337 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1338 { 1339 int bit; 1340 1341 ocr &= host->ocr_avail; 1342 1343 bit = ffs(ocr); 1344 if (bit) { 1345 bit -= 1; 1346 1347 ocr &= 3 << bit; 1348 1349 mmc_host_clk_hold(host); 1350 host->ios.vdd = bit; 1351 mmc_set_ios(host); 1352 mmc_host_clk_release(host); 1353 } else { 1354 pr_warning("%s: host doesn't support card's voltages\n", 1355 mmc_hostname(host)); 1356 ocr = 0; 1357 } 1358 1359 return ocr; 1360 } 1361 1362 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1363 { 1364 int err = 0; 1365 int old_signal_voltage = host->ios.signal_voltage; 1366 1367 host->ios.signal_voltage = signal_voltage; 1368 if (host->ops->start_signal_voltage_switch) { 1369 mmc_host_clk_hold(host); 1370 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1371 mmc_host_clk_release(host); 1372 } 1373 1374 if (err) 1375 host->ios.signal_voltage = old_signal_voltage; 1376 1377 return err; 1378 1379 } 1380 1381 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1382 { 1383 struct mmc_command cmd = {0}; 1384 int err = 0; 1385 u32 clock; 1386 1387 BUG_ON(!host); 1388 1389 /* 1390 * Send CMD11 only if the request is to switch the card to 1391 * 1.8V signalling. 1392 */ 1393 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1394 return __mmc_set_signal_voltage(host, signal_voltage); 1395 1396 /* 1397 * If we cannot switch voltages, return failure so the caller 1398 * can continue without UHS mode 1399 */ 1400 if (!host->ops->start_signal_voltage_switch) 1401 return -EPERM; 1402 if (!host->ops->card_busy) 1403 pr_warning("%s: cannot verify signal voltage switch\n", 1404 mmc_hostname(host)); 1405 1406 cmd.opcode = SD_SWITCH_VOLTAGE; 1407 cmd.arg = 0; 1408 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1409 1410 err = mmc_wait_for_cmd(host, &cmd, 0); 1411 if (err) 1412 return err; 1413 1414 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1415 return -EIO; 1416 1417 mmc_host_clk_hold(host); 1418 /* 1419 * The card should drive cmd and dat[0:3] low immediately 1420 * after the response of cmd11, but wait 1 ms to be sure 1421 */ 1422 mmc_delay(1); 1423 if (host->ops->card_busy && !host->ops->card_busy(host)) { 1424 err = -EAGAIN; 1425 goto power_cycle; 1426 } 1427 /* 1428 * During a signal voltage level switch, the clock must be gated 1429 * for 5 ms according to the SD spec 1430 */ 1431 clock = host->ios.clock; 1432 host->ios.clock = 0; 1433 mmc_set_ios(host); 1434 1435 if (__mmc_set_signal_voltage(host, signal_voltage)) { 1436 /* 1437 * Voltages may not have been switched, but we've already 1438 * sent CMD11, so a power cycle is required anyway 1439 */ 1440 err = -EAGAIN; 1441 goto power_cycle; 1442 } 1443 1444 /* Keep clock gated for at least 5 ms */ 1445 mmc_delay(5); 1446 host->ios.clock = clock; 1447 mmc_set_ios(host); 1448 1449 /* Wait for at least 1 ms according to spec */ 1450 mmc_delay(1); 1451 1452 /* 1453 * Failure to switch is indicated by the card holding 1454 * dat[0:3] low 1455 */ 1456 if (host->ops->card_busy && host->ops->card_busy(host)) 1457 err = -EAGAIN; 1458 1459 power_cycle: 1460 if (err) { 1461 pr_debug("%s: Signal voltage switch failed, " 1462 "power cycling card\n", mmc_hostname(host)); 1463 mmc_power_cycle(host); 1464 } 1465 1466 mmc_host_clk_release(host); 1467 1468 return err; 1469 } 1470 1471 /* 1472 * Select timing parameters for host. 1473 */ 1474 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1475 { 1476 mmc_host_clk_hold(host); 1477 host->ios.timing = timing; 1478 mmc_set_ios(host); 1479 mmc_host_clk_release(host); 1480 } 1481 1482 /* 1483 * Select appropriate driver type for host. 1484 */ 1485 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1486 { 1487 mmc_host_clk_hold(host); 1488 host->ios.drv_type = drv_type; 1489 mmc_set_ios(host); 1490 mmc_host_clk_release(host); 1491 } 1492 1493 /* 1494 * Apply power to the MMC stack. This is a two-stage process. 1495 * First, we enable power to the card without the clock running. 1496 * We then wait a bit for the power to stabilise. Finally, 1497 * enable the bus drivers and clock to the card. 1498 * 1499 * We must _NOT_ enable the clock prior to power stablising. 1500 * 1501 * If a host does all the power sequencing itself, ignore the 1502 * initial MMC_POWER_UP stage. 1503 */ 1504 void mmc_power_up(struct mmc_host *host) 1505 { 1506 int bit; 1507 1508 if (host->ios.power_mode == MMC_POWER_ON) 1509 return; 1510 1511 mmc_host_clk_hold(host); 1512 1513 /* If ocr is set, we use it */ 1514 if (host->ocr) 1515 bit = ffs(host->ocr) - 1; 1516 else 1517 bit = fls(host->ocr_avail) - 1; 1518 1519 host->ios.vdd = bit; 1520 if (mmc_host_is_spi(host)) 1521 host->ios.chip_select = MMC_CS_HIGH; 1522 else 1523 host->ios.chip_select = MMC_CS_DONTCARE; 1524 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1525 host->ios.power_mode = MMC_POWER_UP; 1526 host->ios.bus_width = MMC_BUS_WIDTH_1; 1527 host->ios.timing = MMC_TIMING_LEGACY; 1528 mmc_set_ios(host); 1529 1530 /* Set signal voltage to 3.3V */ 1531 __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); 1532 1533 /* 1534 * This delay should be sufficient to allow the power supply 1535 * to reach the minimum voltage. 1536 */ 1537 mmc_delay(10); 1538 1539 host->ios.clock = host->f_init; 1540 1541 host->ios.power_mode = MMC_POWER_ON; 1542 mmc_set_ios(host); 1543 1544 /* 1545 * This delay must be at least 74 clock sizes, or 1 ms, or the 1546 * time required to reach a stable voltage. 1547 */ 1548 mmc_delay(10); 1549 1550 mmc_host_clk_release(host); 1551 } 1552 1553 void mmc_power_off(struct mmc_host *host) 1554 { 1555 if (host->ios.power_mode == MMC_POWER_OFF) 1556 return; 1557 1558 mmc_host_clk_hold(host); 1559 1560 host->ios.clock = 0; 1561 host->ios.vdd = 0; 1562 1563 1564 /* 1565 * Reset ocr mask to be the highest possible voltage supported for 1566 * this mmc host. This value will be used at next power up. 1567 */ 1568 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1569 1570 if (!mmc_host_is_spi(host)) { 1571 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1572 host->ios.chip_select = MMC_CS_DONTCARE; 1573 } 1574 host->ios.power_mode = MMC_POWER_OFF; 1575 host->ios.bus_width = MMC_BUS_WIDTH_1; 1576 host->ios.timing = MMC_TIMING_LEGACY; 1577 mmc_set_ios(host); 1578 1579 /* 1580 * Some configurations, such as the 802.11 SDIO card in the OLPC 1581 * XO-1.5, require a short delay after poweroff before the card 1582 * can be successfully turned on again. 1583 */ 1584 mmc_delay(1); 1585 1586 mmc_host_clk_release(host); 1587 } 1588 1589 void mmc_power_cycle(struct mmc_host *host) 1590 { 1591 mmc_power_off(host); 1592 /* Wait at least 1 ms according to SD spec */ 1593 mmc_delay(1); 1594 mmc_power_up(host); 1595 } 1596 1597 /* 1598 * Cleanup when the last reference to the bus operator is dropped. 1599 */ 1600 static void __mmc_release_bus(struct mmc_host *host) 1601 { 1602 BUG_ON(!host); 1603 BUG_ON(host->bus_refs); 1604 BUG_ON(!host->bus_dead); 1605 1606 host->bus_ops = NULL; 1607 } 1608 1609 /* 1610 * Increase reference count of bus operator 1611 */ 1612 static inline void mmc_bus_get(struct mmc_host *host) 1613 { 1614 unsigned long flags; 1615 1616 spin_lock_irqsave(&host->lock, flags); 1617 host->bus_refs++; 1618 spin_unlock_irqrestore(&host->lock, flags); 1619 } 1620 1621 /* 1622 * Decrease reference count of bus operator and free it if 1623 * it is the last reference. 1624 */ 1625 static inline void mmc_bus_put(struct mmc_host *host) 1626 { 1627 unsigned long flags; 1628 1629 spin_lock_irqsave(&host->lock, flags); 1630 host->bus_refs--; 1631 if ((host->bus_refs == 0) && host->bus_ops) 1632 __mmc_release_bus(host); 1633 spin_unlock_irqrestore(&host->lock, flags); 1634 } 1635 1636 /* 1637 * Assign a mmc bus handler to a host. Only one bus handler may control a 1638 * host at any given time. 1639 */ 1640 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1641 { 1642 unsigned long flags; 1643 1644 BUG_ON(!host); 1645 BUG_ON(!ops); 1646 1647 WARN_ON(!host->claimed); 1648 1649 spin_lock_irqsave(&host->lock, flags); 1650 1651 BUG_ON(host->bus_ops); 1652 BUG_ON(host->bus_refs); 1653 1654 host->bus_ops = ops; 1655 host->bus_refs = 1; 1656 host->bus_dead = 0; 1657 1658 spin_unlock_irqrestore(&host->lock, flags); 1659 } 1660 1661 /* 1662 * Remove the current bus handler from a host. 1663 */ 1664 void mmc_detach_bus(struct mmc_host *host) 1665 { 1666 unsigned long flags; 1667 1668 BUG_ON(!host); 1669 1670 WARN_ON(!host->claimed); 1671 WARN_ON(!host->bus_ops); 1672 1673 spin_lock_irqsave(&host->lock, flags); 1674 1675 host->bus_dead = 1; 1676 1677 spin_unlock_irqrestore(&host->lock, flags); 1678 1679 mmc_bus_put(host); 1680 } 1681 1682 /** 1683 * mmc_detect_change - process change of state on a MMC socket 1684 * @host: host which changed state. 1685 * @delay: optional delay to wait before detection (jiffies) 1686 * 1687 * MMC drivers should call this when they detect a card has been 1688 * inserted or removed. The MMC layer will confirm that any 1689 * present card is still functional, and initialize any newly 1690 * inserted. 1691 */ 1692 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1693 { 1694 #ifdef CONFIG_MMC_DEBUG 1695 unsigned long flags; 1696 spin_lock_irqsave(&host->lock, flags); 1697 WARN_ON(host->removed); 1698 spin_unlock_irqrestore(&host->lock, flags); 1699 #endif 1700 host->detect_change = 1; 1701 mmc_schedule_delayed_work(&host->detect, delay); 1702 } 1703 1704 EXPORT_SYMBOL(mmc_detect_change); 1705 1706 void mmc_init_erase(struct mmc_card *card) 1707 { 1708 unsigned int sz; 1709 1710 if (is_power_of_2(card->erase_size)) 1711 card->erase_shift = ffs(card->erase_size) - 1; 1712 else 1713 card->erase_shift = 0; 1714 1715 /* 1716 * It is possible to erase an arbitrarily large area of an SD or MMC 1717 * card. That is not desirable because it can take a long time 1718 * (minutes) potentially delaying more important I/O, and also the 1719 * timeout calculations become increasingly hugely over-estimated. 1720 * Consequently, 'pref_erase' is defined as a guide to limit erases 1721 * to that size and alignment. 1722 * 1723 * For SD cards that define Allocation Unit size, limit erases to one 1724 * Allocation Unit at a time. For MMC cards that define High Capacity 1725 * Erase Size, whether it is switched on or not, limit to that size. 1726 * Otherwise just have a stab at a good value. For modern cards it 1727 * will end up being 4MiB. Note that if the value is too small, it 1728 * can end up taking longer to erase. 1729 */ 1730 if (mmc_card_sd(card) && card->ssr.au) { 1731 card->pref_erase = card->ssr.au; 1732 card->erase_shift = ffs(card->ssr.au) - 1; 1733 } else if (card->ext_csd.hc_erase_size) { 1734 card->pref_erase = card->ext_csd.hc_erase_size; 1735 } else { 1736 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1737 if (sz < 128) 1738 card->pref_erase = 512 * 1024 / 512; 1739 else if (sz < 512) 1740 card->pref_erase = 1024 * 1024 / 512; 1741 else if (sz < 1024) 1742 card->pref_erase = 2 * 1024 * 1024 / 512; 1743 else 1744 card->pref_erase = 4 * 1024 * 1024 / 512; 1745 if (card->pref_erase < card->erase_size) 1746 card->pref_erase = card->erase_size; 1747 else { 1748 sz = card->pref_erase % card->erase_size; 1749 if (sz) 1750 card->pref_erase += card->erase_size - sz; 1751 } 1752 } 1753 } 1754 1755 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1756 unsigned int arg, unsigned int qty) 1757 { 1758 unsigned int erase_timeout; 1759 1760 if (arg == MMC_DISCARD_ARG || 1761 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1762 erase_timeout = card->ext_csd.trim_timeout; 1763 } else if (card->ext_csd.erase_group_def & 1) { 1764 /* High Capacity Erase Group Size uses HC timeouts */ 1765 if (arg == MMC_TRIM_ARG) 1766 erase_timeout = card->ext_csd.trim_timeout; 1767 else 1768 erase_timeout = card->ext_csd.hc_erase_timeout; 1769 } else { 1770 /* CSD Erase Group Size uses write timeout */ 1771 unsigned int mult = (10 << card->csd.r2w_factor); 1772 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1773 unsigned int timeout_us; 1774 1775 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1776 if (card->csd.tacc_ns < 1000000) 1777 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1778 else 1779 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1780 1781 /* 1782 * ios.clock is only a target. The real clock rate might be 1783 * less but not that much less, so fudge it by multiplying by 2. 1784 */ 1785 timeout_clks <<= 1; 1786 timeout_us += (timeout_clks * 1000) / 1787 (mmc_host_clk_rate(card->host) / 1000); 1788 1789 erase_timeout = timeout_us / 1000; 1790 1791 /* 1792 * Theoretically, the calculation could underflow so round up 1793 * to 1ms in that case. 1794 */ 1795 if (!erase_timeout) 1796 erase_timeout = 1; 1797 } 1798 1799 /* Multiplier for secure operations */ 1800 if (arg & MMC_SECURE_ARGS) { 1801 if (arg == MMC_SECURE_ERASE_ARG) 1802 erase_timeout *= card->ext_csd.sec_erase_mult; 1803 else 1804 erase_timeout *= card->ext_csd.sec_trim_mult; 1805 } 1806 1807 erase_timeout *= qty; 1808 1809 /* 1810 * Ensure at least a 1 second timeout for SPI as per 1811 * 'mmc_set_data_timeout()' 1812 */ 1813 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1814 erase_timeout = 1000; 1815 1816 return erase_timeout; 1817 } 1818 1819 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1820 unsigned int arg, 1821 unsigned int qty) 1822 { 1823 unsigned int erase_timeout; 1824 1825 if (card->ssr.erase_timeout) { 1826 /* Erase timeout specified in SD Status Register (SSR) */ 1827 erase_timeout = card->ssr.erase_timeout * qty + 1828 card->ssr.erase_offset; 1829 } else { 1830 /* 1831 * Erase timeout not specified in SD Status Register (SSR) so 1832 * use 250ms per write block. 1833 */ 1834 erase_timeout = 250 * qty; 1835 } 1836 1837 /* Must not be less than 1 second */ 1838 if (erase_timeout < 1000) 1839 erase_timeout = 1000; 1840 1841 return erase_timeout; 1842 } 1843 1844 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1845 unsigned int arg, 1846 unsigned int qty) 1847 { 1848 if (mmc_card_sd(card)) 1849 return mmc_sd_erase_timeout(card, arg, qty); 1850 else 1851 return mmc_mmc_erase_timeout(card, arg, qty); 1852 } 1853 1854 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1855 unsigned int to, unsigned int arg) 1856 { 1857 struct mmc_command cmd = {0}; 1858 unsigned int qty = 0; 1859 unsigned long timeout; 1860 int err; 1861 1862 /* 1863 * qty is used to calculate the erase timeout which depends on how many 1864 * erase groups (or allocation units in SD terminology) are affected. 1865 * We count erasing part of an erase group as one erase group. 1866 * For SD, the allocation units are always a power of 2. For MMC, the 1867 * erase group size is almost certainly also power of 2, but it does not 1868 * seem to insist on that in the JEDEC standard, so we fall back to 1869 * division in that case. SD may not specify an allocation unit size, 1870 * in which case the timeout is based on the number of write blocks. 1871 * 1872 * Note that the timeout for secure trim 2 will only be correct if the 1873 * number of erase groups specified is the same as the total of all 1874 * preceding secure trim 1 commands. Since the power may have been 1875 * lost since the secure trim 1 commands occurred, it is generally 1876 * impossible to calculate the secure trim 2 timeout correctly. 1877 */ 1878 if (card->erase_shift) 1879 qty += ((to >> card->erase_shift) - 1880 (from >> card->erase_shift)) + 1; 1881 else if (mmc_card_sd(card)) 1882 qty += to - from + 1; 1883 else 1884 qty += ((to / card->erase_size) - 1885 (from / card->erase_size)) + 1; 1886 1887 if (!mmc_card_blockaddr(card)) { 1888 from <<= 9; 1889 to <<= 9; 1890 } 1891 1892 if (mmc_card_sd(card)) 1893 cmd.opcode = SD_ERASE_WR_BLK_START; 1894 else 1895 cmd.opcode = MMC_ERASE_GROUP_START; 1896 cmd.arg = from; 1897 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1898 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1899 if (err) { 1900 pr_err("mmc_erase: group start error %d, " 1901 "status %#x\n", err, cmd.resp[0]); 1902 err = -EIO; 1903 goto out; 1904 } 1905 1906 memset(&cmd, 0, sizeof(struct mmc_command)); 1907 if (mmc_card_sd(card)) 1908 cmd.opcode = SD_ERASE_WR_BLK_END; 1909 else 1910 cmd.opcode = MMC_ERASE_GROUP_END; 1911 cmd.arg = to; 1912 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1913 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1914 if (err) { 1915 pr_err("mmc_erase: group end error %d, status %#x\n", 1916 err, cmd.resp[0]); 1917 err = -EIO; 1918 goto out; 1919 } 1920 1921 memset(&cmd, 0, sizeof(struct mmc_command)); 1922 cmd.opcode = MMC_ERASE; 1923 cmd.arg = arg; 1924 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1925 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1926 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1927 if (err) { 1928 pr_err("mmc_erase: erase error %d, status %#x\n", 1929 err, cmd.resp[0]); 1930 err = -EIO; 1931 goto out; 1932 } 1933 1934 if (mmc_host_is_spi(card->host)) 1935 goto out; 1936 1937 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); 1938 do { 1939 memset(&cmd, 0, sizeof(struct mmc_command)); 1940 cmd.opcode = MMC_SEND_STATUS; 1941 cmd.arg = card->rca << 16; 1942 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1943 /* Do not retry else we can't see errors */ 1944 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1945 if (err || (cmd.resp[0] & 0xFDF92000)) { 1946 pr_err("error %d requesting status %#x\n", 1947 err, cmd.resp[0]); 1948 err = -EIO; 1949 goto out; 1950 } 1951 1952 /* Timeout if the device never becomes ready for data and 1953 * never leaves the program state. 1954 */ 1955 if (time_after(jiffies, timeout)) { 1956 pr_err("%s: Card stuck in programming state! %s\n", 1957 mmc_hostname(card->host), __func__); 1958 err = -EIO; 1959 goto out; 1960 } 1961 1962 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1963 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); 1964 out: 1965 return err; 1966 } 1967 1968 /** 1969 * mmc_erase - erase sectors. 1970 * @card: card to erase 1971 * @from: first sector to erase 1972 * @nr: number of sectors to erase 1973 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1974 * 1975 * Caller must claim host before calling this function. 1976 */ 1977 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1978 unsigned int arg) 1979 { 1980 unsigned int rem, to = from + nr; 1981 1982 if (!(card->host->caps & MMC_CAP_ERASE) || 1983 !(card->csd.cmdclass & CCC_ERASE)) 1984 return -EOPNOTSUPP; 1985 1986 if (!card->erase_size) 1987 return -EOPNOTSUPP; 1988 1989 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1990 return -EOPNOTSUPP; 1991 1992 if ((arg & MMC_SECURE_ARGS) && 1993 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1994 return -EOPNOTSUPP; 1995 1996 if ((arg & MMC_TRIM_ARGS) && 1997 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1998 return -EOPNOTSUPP; 1999 2000 if (arg == MMC_SECURE_ERASE_ARG) { 2001 if (from % card->erase_size || nr % card->erase_size) 2002 return -EINVAL; 2003 } 2004 2005 if (arg == MMC_ERASE_ARG) { 2006 rem = from % card->erase_size; 2007 if (rem) { 2008 rem = card->erase_size - rem; 2009 from += rem; 2010 if (nr > rem) 2011 nr -= rem; 2012 else 2013 return 0; 2014 } 2015 rem = nr % card->erase_size; 2016 if (rem) 2017 nr -= rem; 2018 } 2019 2020 if (nr == 0) 2021 return 0; 2022 2023 to = from + nr; 2024 2025 if (to <= from) 2026 return -EINVAL; 2027 2028 /* 'from' and 'to' are inclusive */ 2029 to -= 1; 2030 2031 return mmc_do_erase(card, from, to, arg); 2032 } 2033 EXPORT_SYMBOL(mmc_erase); 2034 2035 int mmc_can_erase(struct mmc_card *card) 2036 { 2037 if ((card->host->caps & MMC_CAP_ERASE) && 2038 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 2039 return 1; 2040 return 0; 2041 } 2042 EXPORT_SYMBOL(mmc_can_erase); 2043 2044 int mmc_can_trim(struct mmc_card *card) 2045 { 2046 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 2047 return 1; 2048 return 0; 2049 } 2050 EXPORT_SYMBOL(mmc_can_trim); 2051 2052 int mmc_can_discard(struct mmc_card *card) 2053 { 2054 /* 2055 * As there's no way to detect the discard support bit at v4.5 2056 * use the s/w feature support filed. 2057 */ 2058 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 2059 return 1; 2060 return 0; 2061 } 2062 EXPORT_SYMBOL(mmc_can_discard); 2063 2064 int mmc_can_sanitize(struct mmc_card *card) 2065 { 2066 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 2067 return 0; 2068 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 2069 return 1; 2070 return 0; 2071 } 2072 EXPORT_SYMBOL(mmc_can_sanitize); 2073 2074 int mmc_can_secure_erase_trim(struct mmc_card *card) 2075 { 2076 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 2077 return 1; 2078 return 0; 2079 } 2080 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 2081 2082 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 2083 unsigned int nr) 2084 { 2085 if (!card->erase_size) 2086 return 0; 2087 if (from % card->erase_size || nr % card->erase_size) 2088 return 0; 2089 return 1; 2090 } 2091 EXPORT_SYMBOL(mmc_erase_group_aligned); 2092 2093 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 2094 unsigned int arg) 2095 { 2096 struct mmc_host *host = card->host; 2097 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 2098 unsigned int last_timeout = 0; 2099 2100 if (card->erase_shift) 2101 max_qty = UINT_MAX >> card->erase_shift; 2102 else if (mmc_card_sd(card)) 2103 max_qty = UINT_MAX; 2104 else 2105 max_qty = UINT_MAX / card->erase_size; 2106 2107 /* Find the largest qty with an OK timeout */ 2108 do { 2109 y = 0; 2110 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 2111 timeout = mmc_erase_timeout(card, arg, qty + x); 2112 if (timeout > host->max_discard_to) 2113 break; 2114 if (timeout < last_timeout) 2115 break; 2116 last_timeout = timeout; 2117 y = x; 2118 } 2119 qty += y; 2120 } while (y); 2121 2122 if (!qty) 2123 return 0; 2124 2125 if (qty == 1) 2126 return 1; 2127 2128 /* Convert qty to sectors */ 2129 if (card->erase_shift) 2130 max_discard = --qty << card->erase_shift; 2131 else if (mmc_card_sd(card)) 2132 max_discard = qty; 2133 else 2134 max_discard = --qty * card->erase_size; 2135 2136 return max_discard; 2137 } 2138 2139 unsigned int mmc_calc_max_discard(struct mmc_card *card) 2140 { 2141 struct mmc_host *host = card->host; 2142 unsigned int max_discard, max_trim; 2143 2144 if (!host->max_discard_to) 2145 return UINT_MAX; 2146 2147 /* 2148 * Without erase_group_def set, MMC erase timeout depends on clock 2149 * frequence which can change. In that case, the best choice is 2150 * just the preferred erase size. 2151 */ 2152 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 2153 return card->pref_erase; 2154 2155 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 2156 if (mmc_can_trim(card)) { 2157 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 2158 if (max_trim < max_discard) 2159 max_discard = max_trim; 2160 } else if (max_discard < card->erase_size) { 2161 max_discard = 0; 2162 } 2163 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 2164 mmc_hostname(host), max_discard, host->max_discard_to); 2165 return max_discard; 2166 } 2167 EXPORT_SYMBOL(mmc_calc_max_discard); 2168 2169 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 2170 { 2171 struct mmc_command cmd = {0}; 2172 2173 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 2174 return 0; 2175 2176 cmd.opcode = MMC_SET_BLOCKLEN; 2177 cmd.arg = blocklen; 2178 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2179 return mmc_wait_for_cmd(card->host, &cmd, 5); 2180 } 2181 EXPORT_SYMBOL(mmc_set_blocklen); 2182 2183 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, 2184 bool is_rel_write) 2185 { 2186 struct mmc_command cmd = {0}; 2187 2188 cmd.opcode = MMC_SET_BLOCK_COUNT; 2189 cmd.arg = blockcount & 0x0000FFFF; 2190 if (is_rel_write) 2191 cmd.arg |= 1 << 31; 2192 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2193 return mmc_wait_for_cmd(card->host, &cmd, 5); 2194 } 2195 EXPORT_SYMBOL(mmc_set_blockcount); 2196 2197 static void mmc_hw_reset_for_init(struct mmc_host *host) 2198 { 2199 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2200 return; 2201 mmc_host_clk_hold(host); 2202 host->ops->hw_reset(host); 2203 mmc_host_clk_release(host); 2204 } 2205 2206 int mmc_can_reset(struct mmc_card *card) 2207 { 2208 u8 rst_n_function; 2209 2210 if (!mmc_card_mmc(card)) 2211 return 0; 2212 rst_n_function = card->ext_csd.rst_n_function; 2213 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 2214 return 0; 2215 return 1; 2216 } 2217 EXPORT_SYMBOL(mmc_can_reset); 2218 2219 static int mmc_do_hw_reset(struct mmc_host *host, int check) 2220 { 2221 struct mmc_card *card = host->card; 2222 2223 if (!host->bus_ops->power_restore) 2224 return -EOPNOTSUPP; 2225 2226 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2227 return -EOPNOTSUPP; 2228 2229 if (!card) 2230 return -EINVAL; 2231 2232 if (!mmc_can_reset(card)) 2233 return -EOPNOTSUPP; 2234 2235 mmc_host_clk_hold(host); 2236 mmc_set_clock(host, host->f_init); 2237 2238 host->ops->hw_reset(host); 2239 2240 /* If the reset has happened, then a status command will fail */ 2241 if (check) { 2242 struct mmc_command cmd = {0}; 2243 int err; 2244 2245 cmd.opcode = MMC_SEND_STATUS; 2246 if (!mmc_host_is_spi(card->host)) 2247 cmd.arg = card->rca << 16; 2248 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2249 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2250 if (!err) { 2251 mmc_host_clk_release(host); 2252 return -ENOSYS; 2253 } 2254 } 2255 2256 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 2257 if (mmc_host_is_spi(host)) { 2258 host->ios.chip_select = MMC_CS_HIGH; 2259 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 2260 } else { 2261 host->ios.chip_select = MMC_CS_DONTCARE; 2262 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 2263 } 2264 host->ios.bus_width = MMC_BUS_WIDTH_1; 2265 host->ios.timing = MMC_TIMING_LEGACY; 2266 mmc_set_ios(host); 2267 2268 mmc_host_clk_release(host); 2269 2270 return host->bus_ops->power_restore(host); 2271 } 2272 2273 int mmc_hw_reset(struct mmc_host *host) 2274 { 2275 return mmc_do_hw_reset(host, 0); 2276 } 2277 EXPORT_SYMBOL(mmc_hw_reset); 2278 2279 int mmc_hw_reset_check(struct mmc_host *host) 2280 { 2281 return mmc_do_hw_reset(host, 1); 2282 } 2283 EXPORT_SYMBOL(mmc_hw_reset_check); 2284 2285 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2286 { 2287 host->f_init = freq; 2288 2289 #ifdef CONFIG_MMC_DEBUG 2290 pr_info("%s: %s: trying to init card at %u Hz\n", 2291 mmc_hostname(host), __func__, host->f_init); 2292 #endif 2293 mmc_power_up(host); 2294 2295 /* 2296 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2297 * do a hardware reset if possible. 2298 */ 2299 mmc_hw_reset_for_init(host); 2300 2301 /* 2302 * sdio_reset sends CMD52 to reset card. Since we do not know 2303 * if the card is being re-initialized, just send it. CMD52 2304 * should be ignored by SD/eMMC cards. 2305 */ 2306 sdio_reset(host); 2307 mmc_go_idle(host); 2308 2309 mmc_send_if_cond(host, host->ocr_avail); 2310 2311 /* Order's important: probe SDIO, then SD, then MMC */ 2312 if (!mmc_attach_sdio(host)) 2313 return 0; 2314 if (!mmc_attach_sd(host)) 2315 return 0; 2316 if (!mmc_attach_mmc(host)) 2317 return 0; 2318 2319 mmc_power_off(host); 2320 return -EIO; 2321 } 2322 2323 int _mmc_detect_card_removed(struct mmc_host *host) 2324 { 2325 int ret; 2326 2327 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 2328 return 0; 2329 2330 if (!host->card || mmc_card_removed(host->card)) 2331 return 1; 2332 2333 ret = host->bus_ops->alive(host); 2334 2335 /* 2336 * Card detect status and alive check may be out of sync if card is 2337 * removed slowly, when card detect switch changes while card/slot 2338 * pads are still contacted in hardware (refer to "SD Card Mechanical 2339 * Addendum, Appendix C: Card Detection Switch"). So reschedule a 2340 * detect work 200ms later for this case. 2341 */ 2342 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { 2343 mmc_detect_change(host, msecs_to_jiffies(200)); 2344 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); 2345 } 2346 2347 if (ret) { 2348 mmc_card_set_removed(host->card); 2349 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2350 } 2351 2352 return ret; 2353 } 2354 2355 int mmc_detect_card_removed(struct mmc_host *host) 2356 { 2357 struct mmc_card *card = host->card; 2358 int ret; 2359 2360 WARN_ON(!host->claimed); 2361 2362 if (!card) 2363 return 1; 2364 2365 ret = mmc_card_removed(card); 2366 /* 2367 * The card will be considered unchanged unless we have been asked to 2368 * detect a change or host requires polling to provide card detection. 2369 */ 2370 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) 2371 return ret; 2372 2373 host->detect_change = 0; 2374 if (!ret) { 2375 ret = _mmc_detect_card_removed(host); 2376 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { 2377 /* 2378 * Schedule a detect work as soon as possible to let a 2379 * rescan handle the card removal. 2380 */ 2381 cancel_delayed_work(&host->detect); 2382 mmc_detect_change(host, 0); 2383 } 2384 } 2385 2386 return ret; 2387 } 2388 EXPORT_SYMBOL(mmc_detect_card_removed); 2389 2390 void mmc_rescan(struct work_struct *work) 2391 { 2392 struct mmc_host *host = 2393 container_of(work, struct mmc_host, detect.work); 2394 int i; 2395 2396 if (host->rescan_disable) 2397 return; 2398 2399 /* If there is a non-removable card registered, only scan once */ 2400 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) 2401 return; 2402 host->rescan_entered = 1; 2403 2404 mmc_bus_get(host); 2405 2406 /* 2407 * if there is a _removable_ card registered, check whether it is 2408 * still present 2409 */ 2410 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2411 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2412 host->bus_ops->detect(host); 2413 2414 host->detect_change = 0; 2415 2416 /* 2417 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2418 * the card is no longer present. 2419 */ 2420 mmc_bus_put(host); 2421 mmc_bus_get(host); 2422 2423 /* if there still is a card present, stop here */ 2424 if (host->bus_ops != NULL) { 2425 mmc_bus_put(host); 2426 goto out; 2427 } 2428 2429 /* 2430 * Only we can add a new handler, so it's safe to 2431 * release the lock here. 2432 */ 2433 mmc_bus_put(host); 2434 2435 if (host->ops->get_cd && host->ops->get_cd(host) == 0) { 2436 mmc_claim_host(host); 2437 mmc_power_off(host); 2438 mmc_release_host(host); 2439 goto out; 2440 } 2441 2442 mmc_claim_host(host); 2443 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2444 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2445 break; 2446 if (freqs[i] <= host->f_min) 2447 break; 2448 } 2449 mmc_release_host(host); 2450 2451 out: 2452 if (host->caps & MMC_CAP_NEEDS_POLL) 2453 mmc_schedule_delayed_work(&host->detect, HZ); 2454 } 2455 2456 void mmc_start_host(struct mmc_host *host) 2457 { 2458 host->f_init = max(freqs[0], host->f_min); 2459 host->rescan_disable = 0; 2460 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) 2461 mmc_power_off(host); 2462 else 2463 mmc_power_up(host); 2464 mmc_detect_change(host, 0); 2465 } 2466 2467 void mmc_stop_host(struct mmc_host *host) 2468 { 2469 #ifdef CONFIG_MMC_DEBUG 2470 unsigned long flags; 2471 spin_lock_irqsave(&host->lock, flags); 2472 host->removed = 1; 2473 spin_unlock_irqrestore(&host->lock, flags); 2474 #endif 2475 2476 host->rescan_disable = 1; 2477 cancel_delayed_work_sync(&host->detect); 2478 mmc_flush_scheduled_work(); 2479 2480 /* clear pm flags now and let card drivers set them as needed */ 2481 host->pm_flags = 0; 2482 2483 mmc_bus_get(host); 2484 if (host->bus_ops && !host->bus_dead) { 2485 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2486 host->bus_ops->remove(host); 2487 mmc_claim_host(host); 2488 mmc_detach_bus(host); 2489 mmc_power_off(host); 2490 mmc_release_host(host); 2491 mmc_bus_put(host); 2492 return; 2493 } 2494 mmc_bus_put(host); 2495 2496 BUG_ON(host->card); 2497 2498 mmc_power_off(host); 2499 } 2500 2501 int mmc_power_save_host(struct mmc_host *host) 2502 { 2503 int ret = 0; 2504 2505 #ifdef CONFIG_MMC_DEBUG 2506 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2507 #endif 2508 2509 mmc_bus_get(host); 2510 2511 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2512 mmc_bus_put(host); 2513 return -EINVAL; 2514 } 2515 2516 if (host->bus_ops->power_save) 2517 ret = host->bus_ops->power_save(host); 2518 2519 mmc_bus_put(host); 2520 2521 mmc_power_off(host); 2522 2523 return ret; 2524 } 2525 EXPORT_SYMBOL(mmc_power_save_host); 2526 2527 int mmc_power_restore_host(struct mmc_host *host) 2528 { 2529 int ret; 2530 2531 #ifdef CONFIG_MMC_DEBUG 2532 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2533 #endif 2534 2535 mmc_bus_get(host); 2536 2537 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2538 mmc_bus_put(host); 2539 return -EINVAL; 2540 } 2541 2542 mmc_power_up(host); 2543 ret = host->bus_ops->power_restore(host); 2544 2545 mmc_bus_put(host); 2546 2547 return ret; 2548 } 2549 EXPORT_SYMBOL(mmc_power_restore_host); 2550 2551 /* 2552 * Flush the cache to the non-volatile storage. 2553 */ 2554 int mmc_flush_cache(struct mmc_card *card) 2555 { 2556 struct mmc_host *host = card->host; 2557 int err = 0; 2558 2559 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2560 return err; 2561 2562 if (mmc_card_mmc(card) && 2563 (card->ext_csd.cache_size > 0) && 2564 (card->ext_csd.cache_ctrl & 1)) { 2565 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2566 EXT_CSD_FLUSH_CACHE, 1, 0); 2567 if (err) 2568 pr_err("%s: cache flush error %d\n", 2569 mmc_hostname(card->host), err); 2570 } 2571 2572 return err; 2573 } 2574 EXPORT_SYMBOL(mmc_flush_cache); 2575 2576 /* 2577 * Turn the cache ON/OFF. 2578 * Turning the cache OFF shall trigger flushing of the data 2579 * to the non-volatile storage. 2580 * This function should be called with host claimed 2581 */ 2582 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2583 { 2584 struct mmc_card *card = host->card; 2585 unsigned int timeout; 2586 int err = 0; 2587 2588 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2589 mmc_card_is_removable(host)) 2590 return err; 2591 2592 if (card && mmc_card_mmc(card) && 2593 (card->ext_csd.cache_size > 0)) { 2594 enable = !!enable; 2595 2596 if (card->ext_csd.cache_ctrl ^ enable) { 2597 timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2598 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2599 EXT_CSD_CACHE_CTRL, enable, timeout); 2600 if (err) 2601 pr_err("%s: cache %s error %d\n", 2602 mmc_hostname(card->host), 2603 enable ? "on" : "off", 2604 err); 2605 else 2606 card->ext_csd.cache_ctrl = enable; 2607 } 2608 } 2609 2610 return err; 2611 } 2612 EXPORT_SYMBOL(mmc_cache_ctrl); 2613 2614 #ifdef CONFIG_PM 2615 2616 /** 2617 * mmc_suspend_host - suspend a host 2618 * @host: mmc host 2619 */ 2620 int mmc_suspend_host(struct mmc_host *host) 2621 { 2622 /* This function is deprecated */ 2623 return 0; 2624 } 2625 EXPORT_SYMBOL(mmc_suspend_host); 2626 2627 /** 2628 * mmc_resume_host - resume a previously suspended host 2629 * @host: mmc host 2630 */ 2631 int mmc_resume_host(struct mmc_host *host) 2632 { 2633 /* This function is deprecated */ 2634 return 0; 2635 } 2636 EXPORT_SYMBOL(mmc_resume_host); 2637 2638 /* Do the card removal on suspend if card is assumed removeable 2639 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2640 to sync the card. 2641 */ 2642 int mmc_pm_notify(struct notifier_block *notify_block, 2643 unsigned long mode, void *unused) 2644 { 2645 struct mmc_host *host = container_of( 2646 notify_block, struct mmc_host, pm_notify); 2647 unsigned long flags; 2648 int err = 0; 2649 2650 switch (mode) { 2651 case PM_HIBERNATION_PREPARE: 2652 case PM_SUSPEND_PREPARE: 2653 spin_lock_irqsave(&host->lock, flags); 2654 host->rescan_disable = 1; 2655 spin_unlock_irqrestore(&host->lock, flags); 2656 cancel_delayed_work_sync(&host->detect); 2657 2658 if (!host->bus_ops) 2659 break; 2660 2661 /* Validate prerequisites for suspend */ 2662 if (host->bus_ops->pre_suspend) 2663 err = host->bus_ops->pre_suspend(host); 2664 if (!err && host->bus_ops->suspend) 2665 break; 2666 2667 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2668 host->bus_ops->remove(host); 2669 mmc_claim_host(host); 2670 mmc_detach_bus(host); 2671 mmc_power_off(host); 2672 mmc_release_host(host); 2673 host->pm_flags = 0; 2674 break; 2675 2676 case PM_POST_SUSPEND: 2677 case PM_POST_HIBERNATION: 2678 case PM_POST_RESTORE: 2679 2680 spin_lock_irqsave(&host->lock, flags); 2681 host->rescan_disable = 0; 2682 spin_unlock_irqrestore(&host->lock, flags); 2683 mmc_detect_change(host, 0); 2684 2685 } 2686 2687 return 0; 2688 } 2689 #endif 2690 2691 /** 2692 * mmc_init_context_info() - init synchronization context 2693 * @host: mmc host 2694 * 2695 * Init struct context_info needed to implement asynchronous 2696 * request mechanism, used by mmc core, host driver and mmc requests 2697 * supplier. 2698 */ 2699 void mmc_init_context_info(struct mmc_host *host) 2700 { 2701 spin_lock_init(&host->context_info.lock); 2702 host->context_info.is_new_req = false; 2703 host->context_info.is_done_rcv = false; 2704 host->context_info.is_waiting_last_req = false; 2705 init_waitqueue_head(&host->context_info.wait); 2706 } 2707 2708 static int __init mmc_init(void) 2709 { 2710 int ret; 2711 2712 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2713 if (!workqueue) 2714 return -ENOMEM; 2715 2716 ret = mmc_register_bus(); 2717 if (ret) 2718 goto destroy_workqueue; 2719 2720 ret = mmc_register_host_class(); 2721 if (ret) 2722 goto unregister_bus; 2723 2724 ret = sdio_register_bus(); 2725 if (ret) 2726 goto unregister_host_class; 2727 2728 return 0; 2729 2730 unregister_host_class: 2731 mmc_unregister_host_class(); 2732 unregister_bus: 2733 mmc_unregister_bus(); 2734 destroy_workqueue: 2735 destroy_workqueue(workqueue); 2736 2737 return ret; 2738 } 2739 2740 static void __exit mmc_exit(void) 2741 { 2742 sdio_unregister_bus(); 2743 mmc_unregister_host_class(); 2744 mmc_unregister_bus(); 2745 destroy_workqueue(workqueue); 2746 } 2747 2748 subsys_initcall(mmc_init); 2749 module_exit(mmc_exit); 2750 2751 MODULE_LICENSE("GPL"); 2752