1 /* 2 * linux/drivers/mmc/core/core.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/completion.h> 17 #include <linux/device.h> 18 #include <linux/delay.h> 19 #include <linux/pagemap.h> 20 #include <linux/err.h> 21 #include <linux/leds.h> 22 #include <linux/scatterlist.h> 23 #include <linux/log2.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/suspend.h> 27 #include <linux/fault-inject.h> 28 #include <linux/random.h> 29 #include <linux/slab.h> 30 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/host.h> 33 #include <linux/mmc/mmc.h> 34 #include <linux/mmc/sd.h> 35 36 #include "core.h" 37 #include "bus.h" 38 #include "host.h" 39 #include "sdio_bus.h" 40 41 #include "mmc_ops.h" 42 #include "sd_ops.h" 43 #include "sdio_ops.h" 44 45 /* If the device is not responding */ 46 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 47 48 /* 49 * Background operations can take a long time, depending on the housekeeping 50 * operations the card has to perform. 51 */ 52 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ 53 54 static struct workqueue_struct *workqueue; 55 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 56 57 /* 58 * Enabling software CRCs on the data blocks can be a significant (30%) 59 * performance cost, and for other reasons may not always be desired. 60 * So we allow it it to be disabled. 61 */ 62 bool use_spi_crc = 1; 63 module_param(use_spi_crc, bool, 0); 64 65 /* 66 * We normally treat cards as removed during suspend if they are not 67 * known to be on a non-removable bus, to avoid the risk of writing 68 * back data to a different card after resume. Allow this to be 69 * overridden if necessary. 70 */ 71 #ifdef CONFIG_MMC_UNSAFE_RESUME 72 bool mmc_assume_removable; 73 #else 74 bool mmc_assume_removable = 1; 75 #endif 76 EXPORT_SYMBOL(mmc_assume_removable); 77 module_param_named(removable, mmc_assume_removable, bool, 0644); 78 MODULE_PARM_DESC( 79 removable, 80 "MMC/SD cards are removable and may be removed during suspend"); 81 82 /* 83 * Internal function. Schedule delayed work in the MMC work queue. 84 */ 85 static int mmc_schedule_delayed_work(struct delayed_work *work, 86 unsigned long delay) 87 { 88 return queue_delayed_work(workqueue, work, delay); 89 } 90 91 /* 92 * Internal function. Flush all scheduled work from the MMC work queue. 93 */ 94 static void mmc_flush_scheduled_work(void) 95 { 96 flush_workqueue(workqueue); 97 } 98 99 #ifdef CONFIG_FAIL_MMC_REQUEST 100 101 /* 102 * Internal function. Inject random data errors. 103 * If mmc_data is NULL no errors are injected. 104 */ 105 static void mmc_should_fail_request(struct mmc_host *host, 106 struct mmc_request *mrq) 107 { 108 struct mmc_command *cmd = mrq->cmd; 109 struct mmc_data *data = mrq->data; 110 static const int data_errors[] = { 111 -ETIMEDOUT, 112 -EILSEQ, 113 -EIO, 114 }; 115 116 if (!data) 117 return; 118 119 if (cmd->error || data->error || 120 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 121 return; 122 123 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 124 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 125 } 126 127 #else /* CONFIG_FAIL_MMC_REQUEST */ 128 129 static inline void mmc_should_fail_request(struct mmc_host *host, 130 struct mmc_request *mrq) 131 { 132 } 133 134 #endif /* CONFIG_FAIL_MMC_REQUEST */ 135 136 /** 137 * mmc_request_done - finish processing an MMC request 138 * @host: MMC host which completed request 139 * @mrq: MMC request which request 140 * 141 * MMC drivers should call this function when they have completed 142 * their processing of a request. 143 */ 144 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 145 { 146 struct mmc_command *cmd = mrq->cmd; 147 int err = cmd->error; 148 149 if (err && cmd->retries && mmc_host_is_spi(host)) { 150 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) 151 cmd->retries = 0; 152 } 153 154 if (err && cmd->retries && !mmc_card_removed(host->card)) { 155 /* 156 * Request starter must handle retries - see 157 * mmc_wait_for_req_done(). 158 */ 159 if (mrq->done) 160 mrq->done(mrq); 161 } else { 162 mmc_should_fail_request(host, mrq); 163 164 led_trigger_event(host->led, LED_OFF); 165 166 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 167 mmc_hostname(host), cmd->opcode, err, 168 cmd->resp[0], cmd->resp[1], 169 cmd->resp[2], cmd->resp[3]); 170 171 if (mrq->data) { 172 pr_debug("%s: %d bytes transferred: %d\n", 173 mmc_hostname(host), 174 mrq->data->bytes_xfered, mrq->data->error); 175 } 176 177 if (mrq->stop) { 178 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", 179 mmc_hostname(host), mrq->stop->opcode, 180 mrq->stop->error, 181 mrq->stop->resp[0], mrq->stop->resp[1], 182 mrq->stop->resp[2], mrq->stop->resp[3]); 183 } 184 185 if (mrq->done) 186 mrq->done(mrq); 187 188 mmc_host_clk_release(host); 189 } 190 } 191 192 EXPORT_SYMBOL(mmc_request_done); 193 194 static void 195 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 196 { 197 #ifdef CONFIG_MMC_DEBUG 198 unsigned int i, sz; 199 struct scatterlist *sg; 200 #endif 201 202 if (mrq->sbc) { 203 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", 204 mmc_hostname(host), mrq->sbc->opcode, 205 mrq->sbc->arg, mrq->sbc->flags); 206 } 207 208 pr_debug("%s: starting CMD%u arg %08x flags %08x\n", 209 mmc_hostname(host), mrq->cmd->opcode, 210 mrq->cmd->arg, mrq->cmd->flags); 211 212 if (mrq->data) { 213 pr_debug("%s: blksz %d blocks %d flags %08x " 214 "tsac %d ms nsac %d\n", 215 mmc_hostname(host), mrq->data->blksz, 216 mrq->data->blocks, mrq->data->flags, 217 mrq->data->timeout_ns / 1000000, 218 mrq->data->timeout_clks); 219 } 220 221 if (mrq->stop) { 222 pr_debug("%s: CMD%u arg %08x flags %08x\n", 223 mmc_hostname(host), mrq->stop->opcode, 224 mrq->stop->arg, mrq->stop->flags); 225 } 226 227 WARN_ON(!host->claimed); 228 229 mrq->cmd->error = 0; 230 mrq->cmd->mrq = mrq; 231 if (mrq->data) { 232 BUG_ON(mrq->data->blksz > host->max_blk_size); 233 BUG_ON(mrq->data->blocks > host->max_blk_count); 234 BUG_ON(mrq->data->blocks * mrq->data->blksz > 235 host->max_req_size); 236 237 #ifdef CONFIG_MMC_DEBUG 238 sz = 0; 239 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) 240 sz += sg->length; 241 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); 242 #endif 243 244 mrq->cmd->data = mrq->data; 245 mrq->data->error = 0; 246 mrq->data->mrq = mrq; 247 if (mrq->stop) { 248 mrq->data->stop = mrq->stop; 249 mrq->stop->error = 0; 250 mrq->stop->mrq = mrq; 251 } 252 } 253 mmc_host_clk_hold(host); 254 led_trigger_event(host->led, LED_FULL); 255 host->ops->request(host, mrq); 256 } 257 258 /** 259 * mmc_start_bkops - start BKOPS for supported cards 260 * @card: MMC card to start BKOPS 261 * @form_exception: A flag to indicate if this function was 262 * called due to an exception raised by the card 263 * 264 * Start background operations whenever requested. 265 * When the urgent BKOPS bit is set in a R1 command response 266 * then background operations should be started immediately. 267 */ 268 void mmc_start_bkops(struct mmc_card *card, bool from_exception) 269 { 270 int err; 271 int timeout; 272 bool use_busy_signal; 273 274 BUG_ON(!card); 275 276 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) 277 return; 278 279 err = mmc_read_bkops_status(card); 280 if (err) { 281 pr_err("%s: Failed to read bkops status: %d\n", 282 mmc_hostname(card->host), err); 283 return; 284 } 285 286 if (!card->ext_csd.raw_bkops_status) 287 return; 288 289 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && 290 from_exception) 291 return; 292 293 mmc_claim_host(card->host); 294 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { 295 timeout = MMC_BKOPS_MAX_TIMEOUT; 296 use_busy_signal = true; 297 } else { 298 timeout = 0; 299 use_busy_signal = false; 300 } 301 302 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 303 EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal); 304 if (err) { 305 pr_warn("%s: Error %d starting bkops\n", 306 mmc_hostname(card->host), err); 307 goto out; 308 } 309 310 /* 311 * For urgent bkops status (LEVEL_2 and more) 312 * bkops executed synchronously, otherwise 313 * the operation is in progress 314 */ 315 if (!use_busy_signal) 316 mmc_card_set_doing_bkops(card); 317 out: 318 mmc_release_host(card->host); 319 } 320 EXPORT_SYMBOL(mmc_start_bkops); 321 322 /* 323 * mmc_wait_data_done() - done callback for data request 324 * @mrq: done data request 325 * 326 * Wakes up mmc context, passed as a callback to host controller driver 327 */ 328 static void mmc_wait_data_done(struct mmc_request *mrq) 329 { 330 mrq->host->context_info.is_done_rcv = true; 331 wake_up_interruptible(&mrq->host->context_info.wait); 332 } 333 334 static void mmc_wait_done(struct mmc_request *mrq) 335 { 336 complete(&mrq->completion); 337 } 338 339 /* 340 *__mmc_start_data_req() - starts data request 341 * @host: MMC host to start the request 342 * @mrq: data request to start 343 * 344 * Sets the done callback to be called when request is completed by the card. 345 * Starts data mmc request execution 346 */ 347 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) 348 { 349 mrq->done = mmc_wait_data_done; 350 mrq->host = host; 351 if (mmc_card_removed(host->card)) { 352 mrq->cmd->error = -ENOMEDIUM; 353 mmc_wait_data_done(mrq); 354 return -ENOMEDIUM; 355 } 356 mmc_start_request(host, mrq); 357 358 return 0; 359 } 360 361 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 362 { 363 init_completion(&mrq->completion); 364 mrq->done = mmc_wait_done; 365 if (mmc_card_removed(host->card)) { 366 mrq->cmd->error = -ENOMEDIUM; 367 complete(&mrq->completion); 368 return -ENOMEDIUM; 369 } 370 mmc_start_request(host, mrq); 371 return 0; 372 } 373 374 /* 375 * mmc_wait_for_data_req_done() - wait for request completed 376 * @host: MMC host to prepare the command. 377 * @mrq: MMC request to wait for 378 * 379 * Blocks MMC context till host controller will ack end of data request 380 * execution or new request notification arrives from the block layer. 381 * Handles command retries. 382 * 383 * Returns enum mmc_blk_status after checking errors. 384 */ 385 static int mmc_wait_for_data_req_done(struct mmc_host *host, 386 struct mmc_request *mrq, 387 struct mmc_async_req *next_req) 388 { 389 struct mmc_command *cmd; 390 struct mmc_context_info *context_info = &host->context_info; 391 int err; 392 unsigned long flags; 393 394 while (1) { 395 wait_event_interruptible(context_info->wait, 396 (context_info->is_done_rcv || 397 context_info->is_new_req)); 398 spin_lock_irqsave(&context_info->lock, flags); 399 context_info->is_waiting_last_req = false; 400 spin_unlock_irqrestore(&context_info->lock, flags); 401 if (context_info->is_done_rcv) { 402 context_info->is_done_rcv = false; 403 context_info->is_new_req = false; 404 cmd = mrq->cmd; 405 if (!cmd->error || !cmd->retries || 406 mmc_card_removed(host->card)) { 407 err = host->areq->err_check(host->card, 408 host->areq); 409 break; /* return err */ 410 } else { 411 pr_info("%s: req failed (CMD%u): %d, retrying...\n", 412 mmc_hostname(host), 413 cmd->opcode, cmd->error); 414 cmd->retries--; 415 cmd->error = 0; 416 host->ops->request(host, mrq); 417 continue; /* wait for done/new event again */ 418 } 419 } else if (context_info->is_new_req) { 420 context_info->is_new_req = false; 421 if (!next_req) { 422 err = MMC_BLK_NEW_REQUEST; 423 break; /* return err */ 424 } 425 } 426 } 427 return err; 428 } 429 430 static void mmc_wait_for_req_done(struct mmc_host *host, 431 struct mmc_request *mrq) 432 { 433 struct mmc_command *cmd; 434 435 while (1) { 436 wait_for_completion(&mrq->completion); 437 438 cmd = mrq->cmd; 439 if (!cmd->error || !cmd->retries || 440 mmc_card_removed(host->card)) 441 break; 442 443 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 444 mmc_hostname(host), cmd->opcode, cmd->error); 445 cmd->retries--; 446 cmd->error = 0; 447 host->ops->request(host, mrq); 448 } 449 } 450 451 /** 452 * mmc_pre_req - Prepare for a new request 453 * @host: MMC host to prepare command 454 * @mrq: MMC request to prepare for 455 * @is_first_req: true if there is no previous started request 456 * that may run in parellel to this call, otherwise false 457 * 458 * mmc_pre_req() is called in prior to mmc_start_req() to let 459 * host prepare for the new request. Preparation of a request may be 460 * performed while another request is running on the host. 461 */ 462 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 463 bool is_first_req) 464 { 465 if (host->ops->pre_req) { 466 mmc_host_clk_hold(host); 467 host->ops->pre_req(host, mrq, is_first_req); 468 mmc_host_clk_release(host); 469 } 470 } 471 472 /** 473 * mmc_post_req - Post process a completed request 474 * @host: MMC host to post process command 475 * @mrq: MMC request to post process for 476 * @err: Error, if non zero, clean up any resources made in pre_req 477 * 478 * Let the host post process a completed request. Post processing of 479 * a request may be performed while another reuqest is running. 480 */ 481 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 482 int err) 483 { 484 if (host->ops->post_req) { 485 mmc_host_clk_hold(host); 486 host->ops->post_req(host, mrq, err); 487 mmc_host_clk_release(host); 488 } 489 } 490 491 /** 492 * mmc_start_req - start a non-blocking request 493 * @host: MMC host to start command 494 * @areq: async request to start 495 * @error: out parameter returns 0 for success, otherwise non zero 496 * 497 * Start a new MMC custom command request for a host. 498 * If there is on ongoing async request wait for completion 499 * of that request and start the new one and return. 500 * Does not wait for the new request to complete. 501 * 502 * Returns the completed request, NULL in case of none completed. 503 * Wait for the an ongoing request (previoulsy started) to complete and 504 * return the completed request. If there is no ongoing request, NULL 505 * is returned without waiting. NULL is not an error condition. 506 */ 507 struct mmc_async_req *mmc_start_req(struct mmc_host *host, 508 struct mmc_async_req *areq, int *error) 509 { 510 int err = 0; 511 int start_err = 0; 512 struct mmc_async_req *data = host->areq; 513 514 /* Prepare a new request */ 515 if (areq) 516 mmc_pre_req(host, areq->mrq, !host->areq); 517 518 if (host->areq) { 519 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); 520 if (err == MMC_BLK_NEW_REQUEST) { 521 if (error) 522 *error = err; 523 /* 524 * The previous request was not completed, 525 * nothing to return 526 */ 527 return NULL; 528 } 529 /* 530 * Check BKOPS urgency for each R1 response 531 */ 532 if (host->card && mmc_card_mmc(host->card) && 533 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || 534 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && 535 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) 536 mmc_start_bkops(host->card, true); 537 } 538 539 if (!err && areq) 540 start_err = __mmc_start_data_req(host, areq->mrq); 541 542 if (host->areq) 543 mmc_post_req(host, host->areq->mrq, 0); 544 545 /* Cancel a prepared request if it was not started. */ 546 if ((err || start_err) && areq) 547 mmc_post_req(host, areq->mrq, -EINVAL); 548 549 if (err) 550 host->areq = NULL; 551 else 552 host->areq = areq; 553 554 if (error) 555 *error = err; 556 return data; 557 } 558 EXPORT_SYMBOL(mmc_start_req); 559 560 /** 561 * mmc_wait_for_req - start a request and wait for completion 562 * @host: MMC host to start command 563 * @mrq: MMC request to start 564 * 565 * Start a new MMC custom command request for a host, and wait 566 * for the command to complete. Does not attempt to parse the 567 * response. 568 */ 569 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 570 { 571 __mmc_start_req(host, mrq); 572 mmc_wait_for_req_done(host, mrq); 573 } 574 EXPORT_SYMBOL(mmc_wait_for_req); 575 576 /** 577 * mmc_interrupt_hpi - Issue for High priority Interrupt 578 * @card: the MMC card associated with the HPI transfer 579 * 580 * Issued High Priority Interrupt, and check for card status 581 * until out-of prg-state. 582 */ 583 int mmc_interrupt_hpi(struct mmc_card *card) 584 { 585 int err; 586 u32 status; 587 unsigned long prg_wait; 588 589 BUG_ON(!card); 590 591 if (!card->ext_csd.hpi_en) { 592 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 593 return 1; 594 } 595 596 mmc_claim_host(card->host); 597 err = mmc_send_status(card, &status); 598 if (err) { 599 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 600 goto out; 601 } 602 603 switch (R1_CURRENT_STATE(status)) { 604 case R1_STATE_IDLE: 605 case R1_STATE_READY: 606 case R1_STATE_STBY: 607 case R1_STATE_TRAN: 608 /* 609 * In idle and transfer states, HPI is not needed and the caller 610 * can issue the next intended command immediately 611 */ 612 goto out; 613 case R1_STATE_PRG: 614 break; 615 default: 616 /* In all other states, it's illegal to issue HPI */ 617 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 618 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 619 err = -EINVAL; 620 goto out; 621 } 622 623 err = mmc_send_hpi_cmd(card, &status); 624 if (err) 625 goto out; 626 627 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 628 do { 629 err = mmc_send_status(card, &status); 630 631 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 632 break; 633 if (time_after(jiffies, prg_wait)) 634 err = -ETIMEDOUT; 635 } while (!err); 636 637 out: 638 mmc_release_host(card->host); 639 return err; 640 } 641 EXPORT_SYMBOL(mmc_interrupt_hpi); 642 643 /** 644 * mmc_wait_for_cmd - start a command and wait for completion 645 * @host: MMC host to start command 646 * @cmd: MMC command to start 647 * @retries: maximum number of retries 648 * 649 * Start a new MMC command for a host, and wait for the command 650 * to complete. Return any error that occurred while the command 651 * was executing. Do not attempt to parse the response. 652 */ 653 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 654 { 655 struct mmc_request mrq = {NULL}; 656 657 WARN_ON(!host->claimed); 658 659 memset(cmd->resp, 0, sizeof(cmd->resp)); 660 cmd->retries = retries; 661 662 mrq.cmd = cmd; 663 cmd->data = NULL; 664 665 mmc_wait_for_req(host, &mrq); 666 667 return cmd->error; 668 } 669 670 EXPORT_SYMBOL(mmc_wait_for_cmd); 671 672 /** 673 * mmc_stop_bkops - stop ongoing BKOPS 674 * @card: MMC card to check BKOPS 675 * 676 * Send HPI command to stop ongoing background operations to 677 * allow rapid servicing of foreground operations, e.g. read/ 678 * writes. Wait until the card comes out of the programming state 679 * to avoid errors in servicing read/write requests. 680 */ 681 int mmc_stop_bkops(struct mmc_card *card) 682 { 683 int err = 0; 684 685 BUG_ON(!card); 686 err = mmc_interrupt_hpi(card); 687 688 /* 689 * If err is EINVAL, we can't issue an HPI. 690 * It should complete the BKOPS. 691 */ 692 if (!err || (err == -EINVAL)) { 693 mmc_card_clr_doing_bkops(card); 694 err = 0; 695 } 696 697 return err; 698 } 699 EXPORT_SYMBOL(mmc_stop_bkops); 700 701 int mmc_read_bkops_status(struct mmc_card *card) 702 { 703 int err; 704 u8 *ext_csd; 705 706 /* 707 * In future work, we should consider storing the entire ext_csd. 708 */ 709 ext_csd = kmalloc(512, GFP_KERNEL); 710 if (!ext_csd) { 711 pr_err("%s: could not allocate buffer to receive the ext_csd.\n", 712 mmc_hostname(card->host)); 713 return -ENOMEM; 714 } 715 716 mmc_claim_host(card->host); 717 err = mmc_send_ext_csd(card, ext_csd); 718 mmc_release_host(card->host); 719 if (err) 720 goto out; 721 722 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 723 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 724 out: 725 kfree(ext_csd); 726 return err; 727 } 728 EXPORT_SYMBOL(mmc_read_bkops_status); 729 730 /** 731 * mmc_set_data_timeout - set the timeout for a data command 732 * @data: data phase for command 733 * @card: the MMC card associated with the data transfer 734 * 735 * Computes the data timeout parameters according to the 736 * correct algorithm given the card type. 737 */ 738 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) 739 { 740 unsigned int mult; 741 742 /* 743 * SDIO cards only define an upper 1 s limit on access. 744 */ 745 if (mmc_card_sdio(card)) { 746 data->timeout_ns = 1000000000; 747 data->timeout_clks = 0; 748 return; 749 } 750 751 /* 752 * SD cards use a 100 multiplier rather than 10 753 */ 754 mult = mmc_card_sd(card) ? 100 : 10; 755 756 /* 757 * Scale up the multiplier (and therefore the timeout) by 758 * the r2w factor for writes. 759 */ 760 if (data->flags & MMC_DATA_WRITE) 761 mult <<= card->csd.r2w_factor; 762 763 data->timeout_ns = card->csd.tacc_ns * mult; 764 data->timeout_clks = card->csd.tacc_clks * mult; 765 766 /* 767 * SD cards also have an upper limit on the timeout. 768 */ 769 if (mmc_card_sd(card)) { 770 unsigned int timeout_us, limit_us; 771 772 timeout_us = data->timeout_ns / 1000; 773 if (mmc_host_clk_rate(card->host)) 774 timeout_us += data->timeout_clks * 1000 / 775 (mmc_host_clk_rate(card->host) / 1000); 776 777 if (data->flags & MMC_DATA_WRITE) 778 /* 779 * The MMC spec "It is strongly recommended 780 * for hosts to implement more than 500ms 781 * timeout value even if the card indicates 782 * the 250ms maximum busy length." Even the 783 * previous value of 300ms is known to be 784 * insufficient for some cards. 785 */ 786 limit_us = 3000000; 787 else 788 limit_us = 100000; 789 790 /* 791 * SDHC cards always use these fixed values. 792 */ 793 if (timeout_us > limit_us || mmc_card_blockaddr(card)) { 794 data->timeout_ns = limit_us * 1000; 795 data->timeout_clks = 0; 796 } 797 } 798 799 /* 800 * Some cards require longer data read timeout than indicated in CSD. 801 * Address this by setting the read timeout to a "reasonably high" 802 * value. For the cards tested, 300ms has proven enough. If necessary, 803 * this value can be increased if other problematic cards require this. 804 */ 805 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { 806 data->timeout_ns = 300000000; 807 data->timeout_clks = 0; 808 } 809 810 /* 811 * Some cards need very high timeouts if driven in SPI mode. 812 * The worst observed timeout was 900ms after writing a 813 * continuous stream of data until the internal logic 814 * overflowed. 815 */ 816 if (mmc_host_is_spi(card->host)) { 817 if (data->flags & MMC_DATA_WRITE) { 818 if (data->timeout_ns < 1000000000) 819 data->timeout_ns = 1000000000; /* 1s */ 820 } else { 821 if (data->timeout_ns < 100000000) 822 data->timeout_ns = 100000000; /* 100ms */ 823 } 824 } 825 } 826 EXPORT_SYMBOL(mmc_set_data_timeout); 827 828 /** 829 * mmc_align_data_size - pads a transfer size to a more optimal value 830 * @card: the MMC card associated with the data transfer 831 * @sz: original transfer size 832 * 833 * Pads the original data size with a number of extra bytes in 834 * order to avoid controller bugs and/or performance hits 835 * (e.g. some controllers revert to PIO for certain sizes). 836 * 837 * Returns the improved size, which might be unmodified. 838 * 839 * Note that this function is only relevant when issuing a 840 * single scatter gather entry. 841 */ 842 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) 843 { 844 /* 845 * FIXME: We don't have a system for the controller to tell 846 * the core about its problems yet, so for now we just 32-bit 847 * align the size. 848 */ 849 sz = ((sz + 3) / 4) * 4; 850 851 return sz; 852 } 853 EXPORT_SYMBOL(mmc_align_data_size); 854 855 /** 856 * __mmc_claim_host - exclusively claim a host 857 * @host: mmc host to claim 858 * @abort: whether or not the operation should be aborted 859 * 860 * Claim a host for a set of operations. If @abort is non null and 861 * dereference a non-zero value then this will return prematurely with 862 * that non-zero value without acquiring the lock. Returns zero 863 * with the lock held otherwise. 864 */ 865 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) 866 { 867 DECLARE_WAITQUEUE(wait, current); 868 unsigned long flags; 869 int stop; 870 871 might_sleep(); 872 873 add_wait_queue(&host->wq, &wait); 874 spin_lock_irqsave(&host->lock, flags); 875 while (1) { 876 set_current_state(TASK_UNINTERRUPTIBLE); 877 stop = abort ? atomic_read(abort) : 0; 878 if (stop || !host->claimed || host->claimer == current) 879 break; 880 spin_unlock_irqrestore(&host->lock, flags); 881 schedule(); 882 spin_lock_irqsave(&host->lock, flags); 883 } 884 set_current_state(TASK_RUNNING); 885 if (!stop) { 886 host->claimed = 1; 887 host->claimer = current; 888 host->claim_cnt += 1; 889 } else 890 wake_up(&host->wq); 891 spin_unlock_irqrestore(&host->lock, flags); 892 remove_wait_queue(&host->wq, &wait); 893 if (host->ops->enable && !stop && host->claim_cnt == 1) 894 host->ops->enable(host); 895 return stop; 896 } 897 898 EXPORT_SYMBOL(__mmc_claim_host); 899 900 /** 901 * mmc_try_claim_host - try exclusively to claim a host 902 * @host: mmc host to claim 903 * 904 * Returns %1 if the host is claimed, %0 otherwise. 905 */ 906 int mmc_try_claim_host(struct mmc_host *host) 907 { 908 int claimed_host = 0; 909 unsigned long flags; 910 911 spin_lock_irqsave(&host->lock, flags); 912 if (!host->claimed || host->claimer == current) { 913 host->claimed = 1; 914 host->claimer = current; 915 host->claim_cnt += 1; 916 claimed_host = 1; 917 } 918 spin_unlock_irqrestore(&host->lock, flags); 919 if (host->ops->enable && claimed_host && host->claim_cnt == 1) 920 host->ops->enable(host); 921 return claimed_host; 922 } 923 EXPORT_SYMBOL(mmc_try_claim_host); 924 925 /** 926 * mmc_release_host - release a host 927 * @host: mmc host to release 928 * 929 * Release a MMC host, allowing others to claim the host 930 * for their operations. 931 */ 932 void mmc_release_host(struct mmc_host *host) 933 { 934 unsigned long flags; 935 936 WARN_ON(!host->claimed); 937 938 if (host->ops->disable && host->claim_cnt == 1) 939 host->ops->disable(host); 940 941 spin_lock_irqsave(&host->lock, flags); 942 if (--host->claim_cnt) { 943 /* Release for nested claim */ 944 spin_unlock_irqrestore(&host->lock, flags); 945 } else { 946 host->claimed = 0; 947 host->claimer = NULL; 948 spin_unlock_irqrestore(&host->lock, flags); 949 wake_up(&host->wq); 950 } 951 } 952 EXPORT_SYMBOL(mmc_release_host); 953 954 /* 955 * Internal function that does the actual ios call to the host driver, 956 * optionally printing some debug output. 957 */ 958 static inline void mmc_set_ios(struct mmc_host *host) 959 { 960 struct mmc_ios *ios = &host->ios; 961 962 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " 963 "width %u timing %u\n", 964 mmc_hostname(host), ios->clock, ios->bus_mode, 965 ios->power_mode, ios->chip_select, ios->vdd, 966 ios->bus_width, ios->timing); 967 968 if (ios->clock > 0) 969 mmc_set_ungated(host); 970 host->ops->set_ios(host, ios); 971 } 972 973 /* 974 * Control chip select pin on a host. 975 */ 976 void mmc_set_chip_select(struct mmc_host *host, int mode) 977 { 978 mmc_host_clk_hold(host); 979 host->ios.chip_select = mode; 980 mmc_set_ios(host); 981 mmc_host_clk_release(host); 982 } 983 984 /* 985 * Sets the host clock to the highest possible frequency that 986 * is below "hz". 987 */ 988 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 989 { 990 WARN_ON(hz < host->f_min); 991 992 if (hz > host->f_max) 993 hz = host->f_max; 994 995 host->ios.clock = hz; 996 mmc_set_ios(host); 997 } 998 999 void mmc_set_clock(struct mmc_host *host, unsigned int hz) 1000 { 1001 mmc_host_clk_hold(host); 1002 __mmc_set_clock(host, hz); 1003 mmc_host_clk_release(host); 1004 } 1005 1006 #ifdef CONFIG_MMC_CLKGATE 1007 /* 1008 * This gates the clock by setting it to 0 Hz. 1009 */ 1010 void mmc_gate_clock(struct mmc_host *host) 1011 { 1012 unsigned long flags; 1013 1014 spin_lock_irqsave(&host->clk_lock, flags); 1015 host->clk_old = host->ios.clock; 1016 host->ios.clock = 0; 1017 host->clk_gated = true; 1018 spin_unlock_irqrestore(&host->clk_lock, flags); 1019 mmc_set_ios(host); 1020 } 1021 1022 /* 1023 * This restores the clock from gating by using the cached 1024 * clock value. 1025 */ 1026 void mmc_ungate_clock(struct mmc_host *host) 1027 { 1028 /* 1029 * We should previously have gated the clock, so the clock shall 1030 * be 0 here! The clock may however be 0 during initialization, 1031 * when some request operations are performed before setting 1032 * the frequency. When ungate is requested in that situation 1033 * we just ignore the call. 1034 */ 1035 if (host->clk_old) { 1036 BUG_ON(host->ios.clock); 1037 /* This call will also set host->clk_gated to false */ 1038 __mmc_set_clock(host, host->clk_old); 1039 } 1040 } 1041 1042 void mmc_set_ungated(struct mmc_host *host) 1043 { 1044 unsigned long flags; 1045 1046 /* 1047 * We've been given a new frequency while the clock is gated, 1048 * so make sure we regard this as ungating it. 1049 */ 1050 spin_lock_irqsave(&host->clk_lock, flags); 1051 host->clk_gated = false; 1052 spin_unlock_irqrestore(&host->clk_lock, flags); 1053 } 1054 1055 #else 1056 void mmc_set_ungated(struct mmc_host *host) 1057 { 1058 } 1059 #endif 1060 1061 /* 1062 * Change the bus mode (open drain/push-pull) of a host. 1063 */ 1064 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 1065 { 1066 mmc_host_clk_hold(host); 1067 host->ios.bus_mode = mode; 1068 mmc_set_ios(host); 1069 mmc_host_clk_release(host); 1070 } 1071 1072 /* 1073 * Change data bus width of a host. 1074 */ 1075 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 1076 { 1077 mmc_host_clk_hold(host); 1078 host->ios.bus_width = width; 1079 mmc_set_ios(host); 1080 mmc_host_clk_release(host); 1081 } 1082 1083 /** 1084 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 1085 * @vdd: voltage (mV) 1086 * @low_bits: prefer low bits in boundary cases 1087 * 1088 * This function returns the OCR bit number according to the provided @vdd 1089 * value. If conversion is not possible a negative errno value returned. 1090 * 1091 * Depending on the @low_bits flag the function prefers low or high OCR bits 1092 * on boundary voltages. For example, 1093 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); 1094 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); 1095 * 1096 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). 1097 */ 1098 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) 1099 { 1100 const int max_bit = ilog2(MMC_VDD_35_36); 1101 int bit; 1102 1103 if (vdd < 1650 || vdd > 3600) 1104 return -EINVAL; 1105 1106 if (vdd >= 1650 && vdd <= 1950) 1107 return ilog2(MMC_VDD_165_195); 1108 1109 if (low_bits) 1110 vdd -= 1; 1111 1112 /* Base 2000 mV, step 100 mV, bit's base 8. */ 1113 bit = (vdd - 2000) / 100 + 8; 1114 if (bit > max_bit) 1115 return max_bit; 1116 return bit; 1117 } 1118 1119 /** 1120 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask 1121 * @vdd_min: minimum voltage value (mV) 1122 * @vdd_max: maximum voltage value (mV) 1123 * 1124 * This function returns the OCR mask bits according to the provided @vdd_min 1125 * and @vdd_max values. If conversion is not possible the function returns 0. 1126 * 1127 * Notes wrt boundary cases: 1128 * This function sets the OCR bits for all boundary voltages, for example 1129 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | 1130 * MMC_VDD_34_35 mask. 1131 */ 1132 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) 1133 { 1134 u32 mask = 0; 1135 1136 if (vdd_max < vdd_min) 1137 return 0; 1138 1139 /* Prefer high bits for the boundary vdd_max values. */ 1140 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); 1141 if (vdd_max < 0) 1142 return 0; 1143 1144 /* Prefer low bits for the boundary vdd_min values. */ 1145 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); 1146 if (vdd_min < 0) 1147 return 0; 1148 1149 /* Fill the mask, from max bit to min bit. */ 1150 while (vdd_max >= vdd_min) 1151 mask |= 1 << vdd_max--; 1152 1153 return mask; 1154 } 1155 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); 1156 1157 #ifdef CONFIG_REGULATOR 1158 1159 /** 1160 * mmc_regulator_get_ocrmask - return mask of supported voltages 1161 * @supply: regulator to use 1162 * 1163 * This returns either a negative errno, or a mask of voltages that 1164 * can be provided to MMC/SD/SDIO devices using the specified voltage 1165 * regulator. This would normally be called before registering the 1166 * MMC host adapter. 1167 */ 1168 int mmc_regulator_get_ocrmask(struct regulator *supply) 1169 { 1170 int result = 0; 1171 int count; 1172 int i; 1173 1174 count = regulator_count_voltages(supply); 1175 if (count < 0) 1176 return count; 1177 1178 for (i = 0; i < count; i++) { 1179 int vdd_uV; 1180 int vdd_mV; 1181 1182 vdd_uV = regulator_list_voltage(supply, i); 1183 if (vdd_uV <= 0) 1184 continue; 1185 1186 vdd_mV = vdd_uV / 1000; 1187 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); 1188 } 1189 1190 return result; 1191 } 1192 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); 1193 1194 /** 1195 * mmc_regulator_set_ocr - set regulator to match host->ios voltage 1196 * @mmc: the host to regulate 1197 * @supply: regulator to use 1198 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) 1199 * 1200 * Returns zero on success, else negative errno. 1201 * 1202 * MMC host drivers may use this to enable or disable a regulator using 1203 * a particular supply voltage. This would normally be called from the 1204 * set_ios() method. 1205 */ 1206 int mmc_regulator_set_ocr(struct mmc_host *mmc, 1207 struct regulator *supply, 1208 unsigned short vdd_bit) 1209 { 1210 int result = 0; 1211 int min_uV, max_uV; 1212 1213 if (vdd_bit) { 1214 int tmp; 1215 int voltage; 1216 1217 /* 1218 * REVISIT mmc_vddrange_to_ocrmask() may have set some 1219 * bits this regulator doesn't quite support ... don't 1220 * be too picky, most cards and regulators are OK with 1221 * a 0.1V range goof (it's a small error percentage). 1222 */ 1223 tmp = vdd_bit - ilog2(MMC_VDD_165_195); 1224 if (tmp == 0) { 1225 min_uV = 1650 * 1000; 1226 max_uV = 1950 * 1000; 1227 } else { 1228 min_uV = 1900 * 1000 + tmp * 100 * 1000; 1229 max_uV = min_uV + 100 * 1000; 1230 } 1231 1232 /* 1233 * If we're using a fixed/static regulator, don't call 1234 * regulator_set_voltage; it would fail. 1235 */ 1236 voltage = regulator_get_voltage(supply); 1237 1238 if (!regulator_can_change_voltage(supply)) 1239 min_uV = max_uV = voltage; 1240 1241 if (voltage < 0) 1242 result = voltage; 1243 else if (voltage < min_uV || voltage > max_uV) 1244 result = regulator_set_voltage(supply, min_uV, max_uV); 1245 else 1246 result = 0; 1247 1248 if (result == 0 && !mmc->regulator_enabled) { 1249 result = regulator_enable(supply); 1250 if (!result) 1251 mmc->regulator_enabled = true; 1252 } 1253 } else if (mmc->regulator_enabled) { 1254 result = regulator_disable(supply); 1255 if (result == 0) 1256 mmc->regulator_enabled = false; 1257 } 1258 1259 if (result) 1260 dev_err(mmc_dev(mmc), 1261 "could not set regulator OCR (%d)\n", result); 1262 return result; 1263 } 1264 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1265 1266 int mmc_regulator_get_supply(struct mmc_host *mmc) 1267 { 1268 struct device *dev = mmc_dev(mmc); 1269 struct regulator *supply; 1270 int ret; 1271 1272 supply = devm_regulator_get(dev, "vmmc"); 1273 mmc->supply.vmmc = supply; 1274 mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc"); 1275 1276 if (IS_ERR(supply)) 1277 return PTR_ERR(supply); 1278 1279 ret = mmc_regulator_get_ocrmask(supply); 1280 if (ret > 0) 1281 mmc->ocr_avail = ret; 1282 else 1283 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); 1284 1285 return 0; 1286 } 1287 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); 1288 1289 #endif /* CONFIG_REGULATOR */ 1290 1291 /* 1292 * Mask off any voltages we don't support and select 1293 * the lowest voltage 1294 */ 1295 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) 1296 { 1297 int bit; 1298 1299 ocr &= host->ocr_avail; 1300 1301 bit = ffs(ocr); 1302 if (bit) { 1303 bit -= 1; 1304 1305 ocr &= 3 << bit; 1306 1307 mmc_host_clk_hold(host); 1308 host->ios.vdd = bit; 1309 mmc_set_ios(host); 1310 mmc_host_clk_release(host); 1311 } else { 1312 pr_warning("%s: host doesn't support card's voltages\n", 1313 mmc_hostname(host)); 1314 ocr = 0; 1315 } 1316 1317 return ocr; 1318 } 1319 1320 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1321 { 1322 int err = 0; 1323 int old_signal_voltage = host->ios.signal_voltage; 1324 1325 host->ios.signal_voltage = signal_voltage; 1326 if (host->ops->start_signal_voltage_switch) { 1327 mmc_host_clk_hold(host); 1328 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1329 mmc_host_clk_release(host); 1330 } 1331 1332 if (err) 1333 host->ios.signal_voltage = old_signal_voltage; 1334 1335 return err; 1336 1337 } 1338 1339 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) 1340 { 1341 struct mmc_command cmd = {0}; 1342 int err = 0; 1343 u32 clock; 1344 1345 BUG_ON(!host); 1346 1347 /* 1348 * Send CMD11 only if the request is to switch the card to 1349 * 1.8V signalling. 1350 */ 1351 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1352 return __mmc_set_signal_voltage(host, signal_voltage); 1353 1354 /* 1355 * If we cannot switch voltages, return failure so the caller 1356 * can continue without UHS mode 1357 */ 1358 if (!host->ops->start_signal_voltage_switch) 1359 return -EPERM; 1360 if (!host->ops->card_busy) 1361 pr_warning("%s: cannot verify signal voltage switch\n", 1362 mmc_hostname(host)); 1363 1364 cmd.opcode = SD_SWITCH_VOLTAGE; 1365 cmd.arg = 0; 1366 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1367 1368 err = mmc_wait_for_cmd(host, &cmd, 0); 1369 if (err) 1370 return err; 1371 1372 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1373 return -EIO; 1374 1375 mmc_host_clk_hold(host); 1376 /* 1377 * The card should drive cmd and dat[0:3] low immediately 1378 * after the response of cmd11, but wait 1 ms to be sure 1379 */ 1380 mmc_delay(1); 1381 if (host->ops->card_busy && !host->ops->card_busy(host)) { 1382 err = -EAGAIN; 1383 goto power_cycle; 1384 } 1385 /* 1386 * During a signal voltage level switch, the clock must be gated 1387 * for 5 ms according to the SD spec 1388 */ 1389 clock = host->ios.clock; 1390 host->ios.clock = 0; 1391 mmc_set_ios(host); 1392 1393 if (__mmc_set_signal_voltage(host, signal_voltage)) { 1394 /* 1395 * Voltages may not have been switched, but we've already 1396 * sent CMD11, so a power cycle is required anyway 1397 */ 1398 err = -EAGAIN; 1399 goto power_cycle; 1400 } 1401 1402 /* Keep clock gated for at least 5 ms */ 1403 mmc_delay(5); 1404 host->ios.clock = clock; 1405 mmc_set_ios(host); 1406 1407 /* Wait for at least 1 ms according to spec */ 1408 mmc_delay(1); 1409 1410 /* 1411 * Failure to switch is indicated by the card holding 1412 * dat[0:3] low 1413 */ 1414 if (host->ops->card_busy && host->ops->card_busy(host)) 1415 err = -EAGAIN; 1416 1417 power_cycle: 1418 if (err) { 1419 pr_debug("%s: Signal voltage switch failed, " 1420 "power cycling card\n", mmc_hostname(host)); 1421 mmc_power_cycle(host); 1422 } 1423 1424 mmc_host_clk_release(host); 1425 1426 return err; 1427 } 1428 1429 /* 1430 * Select timing parameters for host. 1431 */ 1432 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1433 { 1434 mmc_host_clk_hold(host); 1435 host->ios.timing = timing; 1436 mmc_set_ios(host); 1437 mmc_host_clk_release(host); 1438 } 1439 1440 /* 1441 * Select appropriate driver type for host. 1442 */ 1443 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1444 { 1445 mmc_host_clk_hold(host); 1446 host->ios.drv_type = drv_type; 1447 mmc_set_ios(host); 1448 mmc_host_clk_release(host); 1449 } 1450 1451 /* 1452 * Apply power to the MMC stack. This is a two-stage process. 1453 * First, we enable power to the card without the clock running. 1454 * We then wait a bit for the power to stabilise. Finally, 1455 * enable the bus drivers and clock to the card. 1456 * 1457 * We must _NOT_ enable the clock prior to power stablising. 1458 * 1459 * If a host does all the power sequencing itself, ignore the 1460 * initial MMC_POWER_UP stage. 1461 */ 1462 static void mmc_power_up(struct mmc_host *host) 1463 { 1464 int bit; 1465 1466 if (host->ios.power_mode == MMC_POWER_ON) 1467 return; 1468 1469 mmc_host_clk_hold(host); 1470 1471 /* If ocr is set, we use it */ 1472 if (host->ocr) 1473 bit = ffs(host->ocr) - 1; 1474 else 1475 bit = fls(host->ocr_avail) - 1; 1476 1477 host->ios.vdd = bit; 1478 if (mmc_host_is_spi(host)) 1479 host->ios.chip_select = MMC_CS_HIGH; 1480 else 1481 host->ios.chip_select = MMC_CS_DONTCARE; 1482 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1483 host->ios.power_mode = MMC_POWER_UP; 1484 host->ios.bus_width = MMC_BUS_WIDTH_1; 1485 host->ios.timing = MMC_TIMING_LEGACY; 1486 mmc_set_ios(host); 1487 1488 /* Set signal voltage to 3.3V */ 1489 __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); 1490 1491 /* 1492 * This delay should be sufficient to allow the power supply 1493 * to reach the minimum voltage. 1494 */ 1495 mmc_delay(10); 1496 1497 host->ios.clock = host->f_init; 1498 1499 host->ios.power_mode = MMC_POWER_ON; 1500 mmc_set_ios(host); 1501 1502 /* 1503 * This delay must be at least 74 clock sizes, or 1 ms, or the 1504 * time required to reach a stable voltage. 1505 */ 1506 mmc_delay(10); 1507 1508 mmc_host_clk_release(host); 1509 } 1510 1511 void mmc_power_off(struct mmc_host *host) 1512 { 1513 if (host->ios.power_mode == MMC_POWER_OFF) 1514 return; 1515 1516 mmc_host_clk_hold(host); 1517 1518 host->ios.clock = 0; 1519 host->ios.vdd = 0; 1520 1521 1522 /* 1523 * Reset ocr mask to be the highest possible voltage supported for 1524 * this mmc host. This value will be used at next power up. 1525 */ 1526 host->ocr = 1 << (fls(host->ocr_avail) - 1); 1527 1528 if (!mmc_host_is_spi(host)) { 1529 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1530 host->ios.chip_select = MMC_CS_DONTCARE; 1531 } 1532 host->ios.power_mode = MMC_POWER_OFF; 1533 host->ios.bus_width = MMC_BUS_WIDTH_1; 1534 host->ios.timing = MMC_TIMING_LEGACY; 1535 mmc_set_ios(host); 1536 1537 /* 1538 * Some configurations, such as the 802.11 SDIO card in the OLPC 1539 * XO-1.5, require a short delay after poweroff before the card 1540 * can be successfully turned on again. 1541 */ 1542 mmc_delay(1); 1543 1544 mmc_host_clk_release(host); 1545 } 1546 1547 void mmc_power_cycle(struct mmc_host *host) 1548 { 1549 mmc_power_off(host); 1550 /* Wait at least 1 ms according to SD spec */ 1551 mmc_delay(1); 1552 mmc_power_up(host); 1553 } 1554 1555 /* 1556 * Cleanup when the last reference to the bus operator is dropped. 1557 */ 1558 static void __mmc_release_bus(struct mmc_host *host) 1559 { 1560 BUG_ON(!host); 1561 BUG_ON(host->bus_refs); 1562 BUG_ON(!host->bus_dead); 1563 1564 host->bus_ops = NULL; 1565 } 1566 1567 /* 1568 * Increase reference count of bus operator 1569 */ 1570 static inline void mmc_bus_get(struct mmc_host *host) 1571 { 1572 unsigned long flags; 1573 1574 spin_lock_irqsave(&host->lock, flags); 1575 host->bus_refs++; 1576 spin_unlock_irqrestore(&host->lock, flags); 1577 } 1578 1579 /* 1580 * Decrease reference count of bus operator and free it if 1581 * it is the last reference. 1582 */ 1583 static inline void mmc_bus_put(struct mmc_host *host) 1584 { 1585 unsigned long flags; 1586 1587 spin_lock_irqsave(&host->lock, flags); 1588 host->bus_refs--; 1589 if ((host->bus_refs == 0) && host->bus_ops) 1590 __mmc_release_bus(host); 1591 spin_unlock_irqrestore(&host->lock, flags); 1592 } 1593 1594 /* 1595 * Assign a mmc bus handler to a host. Only one bus handler may control a 1596 * host at any given time. 1597 */ 1598 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) 1599 { 1600 unsigned long flags; 1601 1602 BUG_ON(!host); 1603 BUG_ON(!ops); 1604 1605 WARN_ON(!host->claimed); 1606 1607 spin_lock_irqsave(&host->lock, flags); 1608 1609 BUG_ON(host->bus_ops); 1610 BUG_ON(host->bus_refs); 1611 1612 host->bus_ops = ops; 1613 host->bus_refs = 1; 1614 host->bus_dead = 0; 1615 1616 spin_unlock_irqrestore(&host->lock, flags); 1617 } 1618 1619 /* 1620 * Remove the current bus handler from a host. 1621 */ 1622 void mmc_detach_bus(struct mmc_host *host) 1623 { 1624 unsigned long flags; 1625 1626 BUG_ON(!host); 1627 1628 WARN_ON(!host->claimed); 1629 WARN_ON(!host->bus_ops); 1630 1631 spin_lock_irqsave(&host->lock, flags); 1632 1633 host->bus_dead = 1; 1634 1635 spin_unlock_irqrestore(&host->lock, flags); 1636 1637 mmc_bus_put(host); 1638 } 1639 1640 /** 1641 * mmc_detect_change - process change of state on a MMC socket 1642 * @host: host which changed state. 1643 * @delay: optional delay to wait before detection (jiffies) 1644 * 1645 * MMC drivers should call this when they detect a card has been 1646 * inserted or removed. The MMC layer will confirm that any 1647 * present card is still functional, and initialize any newly 1648 * inserted. 1649 */ 1650 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1651 { 1652 #ifdef CONFIG_MMC_DEBUG 1653 unsigned long flags; 1654 spin_lock_irqsave(&host->lock, flags); 1655 WARN_ON(host->removed); 1656 spin_unlock_irqrestore(&host->lock, flags); 1657 #endif 1658 host->detect_change = 1; 1659 mmc_schedule_delayed_work(&host->detect, delay); 1660 } 1661 1662 EXPORT_SYMBOL(mmc_detect_change); 1663 1664 void mmc_init_erase(struct mmc_card *card) 1665 { 1666 unsigned int sz; 1667 1668 if (is_power_of_2(card->erase_size)) 1669 card->erase_shift = ffs(card->erase_size) - 1; 1670 else 1671 card->erase_shift = 0; 1672 1673 /* 1674 * It is possible to erase an arbitrarily large area of an SD or MMC 1675 * card. That is not desirable because it can take a long time 1676 * (minutes) potentially delaying more important I/O, and also the 1677 * timeout calculations become increasingly hugely over-estimated. 1678 * Consequently, 'pref_erase' is defined as a guide to limit erases 1679 * to that size and alignment. 1680 * 1681 * For SD cards that define Allocation Unit size, limit erases to one 1682 * Allocation Unit at a time. For MMC cards that define High Capacity 1683 * Erase Size, whether it is switched on or not, limit to that size. 1684 * Otherwise just have a stab at a good value. For modern cards it 1685 * will end up being 4MiB. Note that if the value is too small, it 1686 * can end up taking longer to erase. 1687 */ 1688 if (mmc_card_sd(card) && card->ssr.au) { 1689 card->pref_erase = card->ssr.au; 1690 card->erase_shift = ffs(card->ssr.au) - 1; 1691 } else if (card->ext_csd.hc_erase_size) { 1692 card->pref_erase = card->ext_csd.hc_erase_size; 1693 } else { 1694 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; 1695 if (sz < 128) 1696 card->pref_erase = 512 * 1024 / 512; 1697 else if (sz < 512) 1698 card->pref_erase = 1024 * 1024 / 512; 1699 else if (sz < 1024) 1700 card->pref_erase = 2 * 1024 * 1024 / 512; 1701 else 1702 card->pref_erase = 4 * 1024 * 1024 / 512; 1703 if (card->pref_erase < card->erase_size) 1704 card->pref_erase = card->erase_size; 1705 else { 1706 sz = card->pref_erase % card->erase_size; 1707 if (sz) 1708 card->pref_erase += card->erase_size - sz; 1709 } 1710 } 1711 } 1712 1713 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, 1714 unsigned int arg, unsigned int qty) 1715 { 1716 unsigned int erase_timeout; 1717 1718 if (arg == MMC_DISCARD_ARG || 1719 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { 1720 erase_timeout = card->ext_csd.trim_timeout; 1721 } else if (card->ext_csd.erase_group_def & 1) { 1722 /* High Capacity Erase Group Size uses HC timeouts */ 1723 if (arg == MMC_TRIM_ARG) 1724 erase_timeout = card->ext_csd.trim_timeout; 1725 else 1726 erase_timeout = card->ext_csd.hc_erase_timeout; 1727 } else { 1728 /* CSD Erase Group Size uses write timeout */ 1729 unsigned int mult = (10 << card->csd.r2w_factor); 1730 unsigned int timeout_clks = card->csd.tacc_clks * mult; 1731 unsigned int timeout_us; 1732 1733 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ 1734 if (card->csd.tacc_ns < 1000000) 1735 timeout_us = (card->csd.tacc_ns * mult) / 1000; 1736 else 1737 timeout_us = (card->csd.tacc_ns / 1000) * mult; 1738 1739 /* 1740 * ios.clock is only a target. The real clock rate might be 1741 * less but not that much less, so fudge it by multiplying by 2. 1742 */ 1743 timeout_clks <<= 1; 1744 timeout_us += (timeout_clks * 1000) / 1745 (mmc_host_clk_rate(card->host) / 1000); 1746 1747 erase_timeout = timeout_us / 1000; 1748 1749 /* 1750 * Theoretically, the calculation could underflow so round up 1751 * to 1ms in that case. 1752 */ 1753 if (!erase_timeout) 1754 erase_timeout = 1; 1755 } 1756 1757 /* Multiplier for secure operations */ 1758 if (arg & MMC_SECURE_ARGS) { 1759 if (arg == MMC_SECURE_ERASE_ARG) 1760 erase_timeout *= card->ext_csd.sec_erase_mult; 1761 else 1762 erase_timeout *= card->ext_csd.sec_trim_mult; 1763 } 1764 1765 erase_timeout *= qty; 1766 1767 /* 1768 * Ensure at least a 1 second timeout for SPI as per 1769 * 'mmc_set_data_timeout()' 1770 */ 1771 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1772 erase_timeout = 1000; 1773 1774 return erase_timeout; 1775 } 1776 1777 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, 1778 unsigned int arg, 1779 unsigned int qty) 1780 { 1781 unsigned int erase_timeout; 1782 1783 if (card->ssr.erase_timeout) { 1784 /* Erase timeout specified in SD Status Register (SSR) */ 1785 erase_timeout = card->ssr.erase_timeout * qty + 1786 card->ssr.erase_offset; 1787 } else { 1788 /* 1789 * Erase timeout not specified in SD Status Register (SSR) so 1790 * use 250ms per write block. 1791 */ 1792 erase_timeout = 250 * qty; 1793 } 1794 1795 /* Must not be less than 1 second */ 1796 if (erase_timeout < 1000) 1797 erase_timeout = 1000; 1798 1799 return erase_timeout; 1800 } 1801 1802 static unsigned int mmc_erase_timeout(struct mmc_card *card, 1803 unsigned int arg, 1804 unsigned int qty) 1805 { 1806 if (mmc_card_sd(card)) 1807 return mmc_sd_erase_timeout(card, arg, qty); 1808 else 1809 return mmc_mmc_erase_timeout(card, arg, qty); 1810 } 1811 1812 static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1813 unsigned int to, unsigned int arg) 1814 { 1815 struct mmc_command cmd = {0}; 1816 unsigned int qty = 0; 1817 unsigned long timeout; 1818 int err; 1819 1820 /* 1821 * qty is used to calculate the erase timeout which depends on how many 1822 * erase groups (or allocation units in SD terminology) are affected. 1823 * We count erasing part of an erase group as one erase group. 1824 * For SD, the allocation units are always a power of 2. For MMC, the 1825 * erase group size is almost certainly also power of 2, but it does not 1826 * seem to insist on that in the JEDEC standard, so we fall back to 1827 * division in that case. SD may not specify an allocation unit size, 1828 * in which case the timeout is based on the number of write blocks. 1829 * 1830 * Note that the timeout for secure trim 2 will only be correct if the 1831 * number of erase groups specified is the same as the total of all 1832 * preceding secure trim 1 commands. Since the power may have been 1833 * lost since the secure trim 1 commands occurred, it is generally 1834 * impossible to calculate the secure trim 2 timeout correctly. 1835 */ 1836 if (card->erase_shift) 1837 qty += ((to >> card->erase_shift) - 1838 (from >> card->erase_shift)) + 1; 1839 else if (mmc_card_sd(card)) 1840 qty += to - from + 1; 1841 else 1842 qty += ((to / card->erase_size) - 1843 (from / card->erase_size)) + 1; 1844 1845 if (!mmc_card_blockaddr(card)) { 1846 from <<= 9; 1847 to <<= 9; 1848 } 1849 1850 if (mmc_card_sd(card)) 1851 cmd.opcode = SD_ERASE_WR_BLK_START; 1852 else 1853 cmd.opcode = MMC_ERASE_GROUP_START; 1854 cmd.arg = from; 1855 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1856 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1857 if (err) { 1858 pr_err("mmc_erase: group start error %d, " 1859 "status %#x\n", err, cmd.resp[0]); 1860 err = -EIO; 1861 goto out; 1862 } 1863 1864 memset(&cmd, 0, sizeof(struct mmc_command)); 1865 if (mmc_card_sd(card)) 1866 cmd.opcode = SD_ERASE_WR_BLK_END; 1867 else 1868 cmd.opcode = MMC_ERASE_GROUP_END; 1869 cmd.arg = to; 1870 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1871 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1872 if (err) { 1873 pr_err("mmc_erase: group end error %d, status %#x\n", 1874 err, cmd.resp[0]); 1875 err = -EIO; 1876 goto out; 1877 } 1878 1879 memset(&cmd, 0, sizeof(struct mmc_command)); 1880 cmd.opcode = MMC_ERASE; 1881 cmd.arg = arg; 1882 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1883 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); 1884 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1885 if (err) { 1886 pr_err("mmc_erase: erase error %d, status %#x\n", 1887 err, cmd.resp[0]); 1888 err = -EIO; 1889 goto out; 1890 } 1891 1892 if (mmc_host_is_spi(card->host)) 1893 goto out; 1894 1895 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); 1896 do { 1897 memset(&cmd, 0, sizeof(struct mmc_command)); 1898 cmd.opcode = MMC_SEND_STATUS; 1899 cmd.arg = card->rca << 16; 1900 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1901 /* Do not retry else we can't see errors */ 1902 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1903 if (err || (cmd.resp[0] & 0xFDF92000)) { 1904 pr_err("error %d requesting status %#x\n", 1905 err, cmd.resp[0]); 1906 err = -EIO; 1907 goto out; 1908 } 1909 1910 /* Timeout if the device never becomes ready for data and 1911 * never leaves the program state. 1912 */ 1913 if (time_after(jiffies, timeout)) { 1914 pr_err("%s: Card stuck in programming state! %s\n", 1915 mmc_hostname(card->host), __func__); 1916 err = -EIO; 1917 goto out; 1918 } 1919 1920 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1921 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); 1922 out: 1923 return err; 1924 } 1925 1926 /** 1927 * mmc_erase - erase sectors. 1928 * @card: card to erase 1929 * @from: first sector to erase 1930 * @nr: number of sectors to erase 1931 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) 1932 * 1933 * Caller must claim host before calling this function. 1934 */ 1935 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, 1936 unsigned int arg) 1937 { 1938 unsigned int rem, to = from + nr; 1939 1940 if (!(card->host->caps & MMC_CAP_ERASE) || 1941 !(card->csd.cmdclass & CCC_ERASE)) 1942 return -EOPNOTSUPP; 1943 1944 if (!card->erase_size) 1945 return -EOPNOTSUPP; 1946 1947 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) 1948 return -EOPNOTSUPP; 1949 1950 if ((arg & MMC_SECURE_ARGS) && 1951 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) 1952 return -EOPNOTSUPP; 1953 1954 if ((arg & MMC_TRIM_ARGS) && 1955 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) 1956 return -EOPNOTSUPP; 1957 1958 if (arg == MMC_SECURE_ERASE_ARG) { 1959 if (from % card->erase_size || nr % card->erase_size) 1960 return -EINVAL; 1961 } 1962 1963 if (arg == MMC_ERASE_ARG) { 1964 rem = from % card->erase_size; 1965 if (rem) { 1966 rem = card->erase_size - rem; 1967 from += rem; 1968 if (nr > rem) 1969 nr -= rem; 1970 else 1971 return 0; 1972 } 1973 rem = nr % card->erase_size; 1974 if (rem) 1975 nr -= rem; 1976 } 1977 1978 if (nr == 0) 1979 return 0; 1980 1981 to = from + nr; 1982 1983 if (to <= from) 1984 return -EINVAL; 1985 1986 /* 'from' and 'to' are inclusive */ 1987 to -= 1; 1988 1989 return mmc_do_erase(card, from, to, arg); 1990 } 1991 EXPORT_SYMBOL(mmc_erase); 1992 1993 int mmc_can_erase(struct mmc_card *card) 1994 { 1995 if ((card->host->caps & MMC_CAP_ERASE) && 1996 (card->csd.cmdclass & CCC_ERASE) && card->erase_size) 1997 return 1; 1998 return 0; 1999 } 2000 EXPORT_SYMBOL(mmc_can_erase); 2001 2002 int mmc_can_trim(struct mmc_card *card) 2003 { 2004 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 2005 return 1; 2006 return 0; 2007 } 2008 EXPORT_SYMBOL(mmc_can_trim); 2009 2010 int mmc_can_discard(struct mmc_card *card) 2011 { 2012 /* 2013 * As there's no way to detect the discard support bit at v4.5 2014 * use the s/w feature support filed. 2015 */ 2016 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) 2017 return 1; 2018 return 0; 2019 } 2020 EXPORT_SYMBOL(mmc_can_discard); 2021 2022 int mmc_can_sanitize(struct mmc_card *card) 2023 { 2024 if (!mmc_can_trim(card) && !mmc_can_erase(card)) 2025 return 0; 2026 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) 2027 return 1; 2028 return 0; 2029 } 2030 EXPORT_SYMBOL(mmc_can_sanitize); 2031 2032 int mmc_can_secure_erase_trim(struct mmc_card *card) 2033 { 2034 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 2035 return 1; 2036 return 0; 2037 } 2038 EXPORT_SYMBOL(mmc_can_secure_erase_trim); 2039 2040 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, 2041 unsigned int nr) 2042 { 2043 if (!card->erase_size) 2044 return 0; 2045 if (from % card->erase_size || nr % card->erase_size) 2046 return 0; 2047 return 1; 2048 } 2049 EXPORT_SYMBOL(mmc_erase_group_aligned); 2050 2051 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, 2052 unsigned int arg) 2053 { 2054 struct mmc_host *host = card->host; 2055 unsigned int max_discard, x, y, qty = 0, max_qty, timeout; 2056 unsigned int last_timeout = 0; 2057 2058 if (card->erase_shift) 2059 max_qty = UINT_MAX >> card->erase_shift; 2060 else if (mmc_card_sd(card)) 2061 max_qty = UINT_MAX; 2062 else 2063 max_qty = UINT_MAX / card->erase_size; 2064 2065 /* Find the largest qty with an OK timeout */ 2066 do { 2067 y = 0; 2068 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { 2069 timeout = mmc_erase_timeout(card, arg, qty + x); 2070 if (timeout > host->max_discard_to) 2071 break; 2072 if (timeout < last_timeout) 2073 break; 2074 last_timeout = timeout; 2075 y = x; 2076 } 2077 qty += y; 2078 } while (y); 2079 2080 if (!qty) 2081 return 0; 2082 2083 if (qty == 1) 2084 return 1; 2085 2086 /* Convert qty to sectors */ 2087 if (card->erase_shift) 2088 max_discard = --qty << card->erase_shift; 2089 else if (mmc_card_sd(card)) 2090 max_discard = qty; 2091 else 2092 max_discard = --qty * card->erase_size; 2093 2094 return max_discard; 2095 } 2096 2097 unsigned int mmc_calc_max_discard(struct mmc_card *card) 2098 { 2099 struct mmc_host *host = card->host; 2100 unsigned int max_discard, max_trim; 2101 2102 if (!host->max_discard_to) 2103 return UINT_MAX; 2104 2105 /* 2106 * Without erase_group_def set, MMC erase timeout depends on clock 2107 * frequence which can change. In that case, the best choice is 2108 * just the preferred erase size. 2109 */ 2110 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) 2111 return card->pref_erase; 2112 2113 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); 2114 if (mmc_can_trim(card)) { 2115 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); 2116 if (max_trim < max_discard) 2117 max_discard = max_trim; 2118 } else if (max_discard < card->erase_size) { 2119 max_discard = 0; 2120 } 2121 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", 2122 mmc_hostname(host), max_discard, host->max_discard_to); 2123 return max_discard; 2124 } 2125 EXPORT_SYMBOL(mmc_calc_max_discard); 2126 2127 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 2128 { 2129 struct mmc_command cmd = {0}; 2130 2131 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 2132 return 0; 2133 2134 cmd.opcode = MMC_SET_BLOCKLEN; 2135 cmd.arg = blocklen; 2136 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2137 return mmc_wait_for_cmd(card->host, &cmd, 5); 2138 } 2139 EXPORT_SYMBOL(mmc_set_blocklen); 2140 2141 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, 2142 bool is_rel_write) 2143 { 2144 struct mmc_command cmd = {0}; 2145 2146 cmd.opcode = MMC_SET_BLOCK_COUNT; 2147 cmd.arg = blockcount & 0x0000FFFF; 2148 if (is_rel_write) 2149 cmd.arg |= 1 << 31; 2150 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 2151 return mmc_wait_for_cmd(card->host, &cmd, 5); 2152 } 2153 EXPORT_SYMBOL(mmc_set_blockcount); 2154 2155 static void mmc_hw_reset_for_init(struct mmc_host *host) 2156 { 2157 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2158 return; 2159 mmc_host_clk_hold(host); 2160 host->ops->hw_reset(host); 2161 mmc_host_clk_release(host); 2162 } 2163 2164 int mmc_can_reset(struct mmc_card *card) 2165 { 2166 u8 rst_n_function; 2167 2168 if (!mmc_card_mmc(card)) 2169 return 0; 2170 rst_n_function = card->ext_csd.rst_n_function; 2171 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) 2172 return 0; 2173 return 1; 2174 } 2175 EXPORT_SYMBOL(mmc_can_reset); 2176 2177 static int mmc_do_hw_reset(struct mmc_host *host, int check) 2178 { 2179 struct mmc_card *card = host->card; 2180 2181 if (!host->bus_ops->power_restore) 2182 return -EOPNOTSUPP; 2183 2184 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2185 return -EOPNOTSUPP; 2186 2187 if (!card) 2188 return -EINVAL; 2189 2190 if (!mmc_can_reset(card)) 2191 return -EOPNOTSUPP; 2192 2193 mmc_host_clk_hold(host); 2194 mmc_set_clock(host, host->f_init); 2195 2196 host->ops->hw_reset(host); 2197 2198 /* If the reset has happened, then a status command will fail */ 2199 if (check) { 2200 struct mmc_command cmd = {0}; 2201 int err; 2202 2203 cmd.opcode = MMC_SEND_STATUS; 2204 if (!mmc_host_is_spi(card->host)) 2205 cmd.arg = card->rca << 16; 2206 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2207 err = mmc_wait_for_cmd(card->host, &cmd, 0); 2208 if (!err) { 2209 mmc_host_clk_release(host); 2210 return -ENOSYS; 2211 } 2212 } 2213 2214 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); 2215 if (mmc_host_is_spi(host)) { 2216 host->ios.chip_select = MMC_CS_HIGH; 2217 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 2218 } else { 2219 host->ios.chip_select = MMC_CS_DONTCARE; 2220 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 2221 } 2222 host->ios.bus_width = MMC_BUS_WIDTH_1; 2223 host->ios.timing = MMC_TIMING_LEGACY; 2224 mmc_set_ios(host); 2225 2226 mmc_host_clk_release(host); 2227 2228 return host->bus_ops->power_restore(host); 2229 } 2230 2231 int mmc_hw_reset(struct mmc_host *host) 2232 { 2233 return mmc_do_hw_reset(host, 0); 2234 } 2235 EXPORT_SYMBOL(mmc_hw_reset); 2236 2237 int mmc_hw_reset_check(struct mmc_host *host) 2238 { 2239 return mmc_do_hw_reset(host, 1); 2240 } 2241 EXPORT_SYMBOL(mmc_hw_reset_check); 2242 2243 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2244 { 2245 host->f_init = freq; 2246 2247 #ifdef CONFIG_MMC_DEBUG 2248 pr_info("%s: %s: trying to init card at %u Hz\n", 2249 mmc_hostname(host), __func__, host->f_init); 2250 #endif 2251 mmc_power_up(host); 2252 2253 /* 2254 * Some eMMCs (with VCCQ always on) may not be reset after power up, so 2255 * do a hardware reset if possible. 2256 */ 2257 mmc_hw_reset_for_init(host); 2258 2259 /* 2260 * sdio_reset sends CMD52 to reset card. Since we do not know 2261 * if the card is being re-initialized, just send it. CMD52 2262 * should be ignored by SD/eMMC cards. 2263 */ 2264 sdio_reset(host); 2265 mmc_go_idle(host); 2266 2267 mmc_send_if_cond(host, host->ocr_avail); 2268 2269 /* Order's important: probe SDIO, then SD, then MMC */ 2270 if (!mmc_attach_sdio(host)) 2271 return 0; 2272 if (!mmc_attach_sd(host)) 2273 return 0; 2274 if (!mmc_attach_mmc(host)) 2275 return 0; 2276 2277 mmc_power_off(host); 2278 return -EIO; 2279 } 2280 2281 int _mmc_detect_card_removed(struct mmc_host *host) 2282 { 2283 int ret; 2284 2285 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) 2286 return 0; 2287 2288 if (!host->card || mmc_card_removed(host->card)) 2289 return 1; 2290 2291 ret = host->bus_ops->alive(host); 2292 2293 /* 2294 * Card detect status and alive check may be out of sync if card is 2295 * removed slowly, when card detect switch changes while card/slot 2296 * pads are still contacted in hardware (refer to "SD Card Mechanical 2297 * Addendum, Appendix C: Card Detection Switch"). So reschedule a 2298 * detect work 200ms later for this case. 2299 */ 2300 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { 2301 mmc_detect_change(host, msecs_to_jiffies(200)); 2302 pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); 2303 } 2304 2305 if (ret) { 2306 mmc_card_set_removed(host->card); 2307 pr_debug("%s: card remove detected\n", mmc_hostname(host)); 2308 } 2309 2310 return ret; 2311 } 2312 2313 int mmc_detect_card_removed(struct mmc_host *host) 2314 { 2315 struct mmc_card *card = host->card; 2316 int ret; 2317 2318 WARN_ON(!host->claimed); 2319 2320 if (!card) 2321 return 1; 2322 2323 ret = mmc_card_removed(card); 2324 /* 2325 * The card will be considered unchanged unless we have been asked to 2326 * detect a change or host requires polling to provide card detection. 2327 */ 2328 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) && 2329 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR)) 2330 return ret; 2331 2332 host->detect_change = 0; 2333 if (!ret) { 2334 ret = _mmc_detect_card_removed(host); 2335 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) { 2336 /* 2337 * Schedule a detect work as soon as possible to let a 2338 * rescan handle the card removal. 2339 */ 2340 cancel_delayed_work(&host->detect); 2341 mmc_detect_change(host, 0); 2342 } 2343 } 2344 2345 return ret; 2346 } 2347 EXPORT_SYMBOL(mmc_detect_card_removed); 2348 2349 void mmc_rescan(struct work_struct *work) 2350 { 2351 struct mmc_host *host = 2352 container_of(work, struct mmc_host, detect.work); 2353 int i; 2354 2355 if (host->rescan_disable) 2356 return; 2357 2358 /* If there is a non-removable card registered, only scan once */ 2359 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) 2360 return; 2361 host->rescan_entered = 1; 2362 2363 mmc_bus_get(host); 2364 2365 /* 2366 * if there is a _removable_ card registered, check whether it is 2367 * still present 2368 */ 2369 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 2370 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2371 host->bus_ops->detect(host); 2372 2373 host->detect_change = 0; 2374 2375 /* 2376 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2377 * the card is no longer present. 2378 */ 2379 mmc_bus_put(host); 2380 mmc_bus_get(host); 2381 2382 /* if there still is a card present, stop here */ 2383 if (host->bus_ops != NULL) { 2384 mmc_bus_put(host); 2385 goto out; 2386 } 2387 2388 /* 2389 * Only we can add a new handler, so it's safe to 2390 * release the lock here. 2391 */ 2392 mmc_bus_put(host); 2393 2394 if (host->ops->get_cd && host->ops->get_cd(host) == 0) { 2395 mmc_claim_host(host); 2396 mmc_power_off(host); 2397 mmc_release_host(host); 2398 goto out; 2399 } 2400 2401 mmc_claim_host(host); 2402 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2403 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2404 break; 2405 if (freqs[i] <= host->f_min) 2406 break; 2407 } 2408 mmc_release_host(host); 2409 2410 out: 2411 if (host->caps & MMC_CAP_NEEDS_POLL) 2412 mmc_schedule_delayed_work(&host->detect, HZ); 2413 } 2414 2415 void mmc_start_host(struct mmc_host *host) 2416 { 2417 host->f_init = max(freqs[0], host->f_min); 2418 host->rescan_disable = 0; 2419 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) 2420 mmc_power_off(host); 2421 else 2422 mmc_power_up(host); 2423 mmc_detect_change(host, 0); 2424 } 2425 2426 void mmc_stop_host(struct mmc_host *host) 2427 { 2428 #ifdef CONFIG_MMC_DEBUG 2429 unsigned long flags; 2430 spin_lock_irqsave(&host->lock, flags); 2431 host->removed = 1; 2432 spin_unlock_irqrestore(&host->lock, flags); 2433 #endif 2434 2435 host->rescan_disable = 1; 2436 cancel_delayed_work_sync(&host->detect); 2437 mmc_flush_scheduled_work(); 2438 2439 /* clear pm flags now and let card drivers set them as needed */ 2440 host->pm_flags = 0; 2441 2442 mmc_bus_get(host); 2443 if (host->bus_ops && !host->bus_dead) { 2444 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2445 if (host->bus_ops->remove) 2446 host->bus_ops->remove(host); 2447 2448 mmc_claim_host(host); 2449 mmc_detach_bus(host); 2450 mmc_power_off(host); 2451 mmc_release_host(host); 2452 mmc_bus_put(host); 2453 return; 2454 } 2455 mmc_bus_put(host); 2456 2457 BUG_ON(host->card); 2458 2459 mmc_power_off(host); 2460 } 2461 2462 int mmc_power_save_host(struct mmc_host *host) 2463 { 2464 int ret = 0; 2465 2466 #ifdef CONFIG_MMC_DEBUG 2467 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); 2468 #endif 2469 2470 mmc_bus_get(host); 2471 2472 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2473 mmc_bus_put(host); 2474 return -EINVAL; 2475 } 2476 2477 if (host->bus_ops->power_save) 2478 ret = host->bus_ops->power_save(host); 2479 2480 mmc_bus_put(host); 2481 2482 mmc_power_off(host); 2483 2484 return ret; 2485 } 2486 EXPORT_SYMBOL(mmc_power_save_host); 2487 2488 int mmc_power_restore_host(struct mmc_host *host) 2489 { 2490 int ret; 2491 2492 #ifdef CONFIG_MMC_DEBUG 2493 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); 2494 #endif 2495 2496 mmc_bus_get(host); 2497 2498 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { 2499 mmc_bus_put(host); 2500 return -EINVAL; 2501 } 2502 2503 mmc_power_up(host); 2504 ret = host->bus_ops->power_restore(host); 2505 2506 mmc_bus_put(host); 2507 2508 return ret; 2509 } 2510 EXPORT_SYMBOL(mmc_power_restore_host); 2511 2512 int mmc_card_awake(struct mmc_host *host) 2513 { 2514 int err = -ENOSYS; 2515 2516 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2517 return 0; 2518 2519 mmc_bus_get(host); 2520 2521 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2522 err = host->bus_ops->awake(host); 2523 2524 mmc_bus_put(host); 2525 2526 return err; 2527 } 2528 EXPORT_SYMBOL(mmc_card_awake); 2529 2530 int mmc_card_sleep(struct mmc_host *host) 2531 { 2532 int err = -ENOSYS; 2533 2534 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 2535 return 0; 2536 2537 mmc_bus_get(host); 2538 2539 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep) 2540 err = host->bus_ops->sleep(host); 2541 2542 mmc_bus_put(host); 2543 2544 return err; 2545 } 2546 EXPORT_SYMBOL(mmc_card_sleep); 2547 2548 int mmc_card_can_sleep(struct mmc_host *host) 2549 { 2550 struct mmc_card *card = host->card; 2551 2552 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) 2553 return 1; 2554 return 0; 2555 } 2556 EXPORT_SYMBOL(mmc_card_can_sleep); 2557 2558 /* 2559 * Flush the cache to the non-volatile storage. 2560 */ 2561 int mmc_flush_cache(struct mmc_card *card) 2562 { 2563 struct mmc_host *host = card->host; 2564 int err = 0; 2565 2566 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) 2567 return err; 2568 2569 if (mmc_card_mmc(card) && 2570 (card->ext_csd.cache_size > 0) && 2571 (card->ext_csd.cache_ctrl & 1)) { 2572 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2573 EXT_CSD_FLUSH_CACHE, 1, 0); 2574 if (err) 2575 pr_err("%s: cache flush error %d\n", 2576 mmc_hostname(card->host), err); 2577 } 2578 2579 return err; 2580 } 2581 EXPORT_SYMBOL(mmc_flush_cache); 2582 2583 /* 2584 * Turn the cache ON/OFF. 2585 * Turning the cache OFF shall trigger flushing of the data 2586 * to the non-volatile storage. 2587 * This function should be called with host claimed 2588 */ 2589 int mmc_cache_ctrl(struct mmc_host *host, u8 enable) 2590 { 2591 struct mmc_card *card = host->card; 2592 unsigned int timeout; 2593 int err = 0; 2594 2595 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || 2596 mmc_card_is_removable(host)) 2597 return err; 2598 2599 if (card && mmc_card_mmc(card) && 2600 (card->ext_csd.cache_size > 0)) { 2601 enable = !!enable; 2602 2603 if (card->ext_csd.cache_ctrl ^ enable) { 2604 timeout = enable ? card->ext_csd.generic_cmd6_time : 0; 2605 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 2606 EXT_CSD_CACHE_CTRL, enable, timeout); 2607 if (err) 2608 pr_err("%s: cache %s error %d\n", 2609 mmc_hostname(card->host), 2610 enable ? "on" : "off", 2611 err); 2612 else 2613 card->ext_csd.cache_ctrl = enable; 2614 } 2615 } 2616 2617 return err; 2618 } 2619 EXPORT_SYMBOL(mmc_cache_ctrl); 2620 2621 #ifdef CONFIG_PM 2622 2623 /** 2624 * mmc_suspend_host - suspend a host 2625 * @host: mmc host 2626 */ 2627 int mmc_suspend_host(struct mmc_host *host) 2628 { 2629 int err = 0; 2630 2631 cancel_delayed_work(&host->detect); 2632 mmc_flush_scheduled_work(); 2633 2634 mmc_bus_get(host); 2635 if (host->bus_ops && !host->bus_dead) { 2636 if (host->bus_ops->suspend) { 2637 if (mmc_card_doing_bkops(host->card)) { 2638 err = mmc_stop_bkops(host->card); 2639 if (err) 2640 goto out; 2641 } 2642 err = host->bus_ops->suspend(host); 2643 } 2644 2645 if (err == -ENOSYS || !host->bus_ops->resume) { 2646 /* 2647 * We simply "remove" the card in this case. 2648 * It will be redetected on resume. (Calling 2649 * bus_ops->remove() with a claimed host can 2650 * deadlock.) 2651 */ 2652 if (host->bus_ops->remove) 2653 host->bus_ops->remove(host); 2654 mmc_claim_host(host); 2655 mmc_detach_bus(host); 2656 mmc_power_off(host); 2657 mmc_release_host(host); 2658 host->pm_flags = 0; 2659 err = 0; 2660 } 2661 } 2662 mmc_bus_put(host); 2663 2664 if (!err && !mmc_card_keep_power(host)) 2665 mmc_power_off(host); 2666 2667 out: 2668 return err; 2669 } 2670 2671 EXPORT_SYMBOL(mmc_suspend_host); 2672 2673 /** 2674 * mmc_resume_host - resume a previously suspended host 2675 * @host: mmc host 2676 */ 2677 int mmc_resume_host(struct mmc_host *host) 2678 { 2679 int err = 0; 2680 2681 mmc_bus_get(host); 2682 if (host->bus_ops && !host->bus_dead) { 2683 if (!mmc_card_keep_power(host)) { 2684 mmc_power_up(host); 2685 mmc_select_voltage(host, host->ocr); 2686 /* 2687 * Tell runtime PM core we just powered up the card, 2688 * since it still believes the card is powered off. 2689 * Note that currently runtime PM is only enabled 2690 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD 2691 */ 2692 if (mmc_card_sdio(host->card) && 2693 (host->caps & MMC_CAP_POWER_OFF_CARD)) { 2694 pm_runtime_disable(&host->card->dev); 2695 pm_runtime_set_active(&host->card->dev); 2696 pm_runtime_enable(&host->card->dev); 2697 } 2698 } 2699 BUG_ON(!host->bus_ops->resume); 2700 err = host->bus_ops->resume(host); 2701 if (err) { 2702 pr_warning("%s: error %d during resume " 2703 "(card was removed?)\n", 2704 mmc_hostname(host), err); 2705 err = 0; 2706 } 2707 } 2708 host->pm_flags &= ~MMC_PM_KEEP_POWER; 2709 mmc_bus_put(host); 2710 2711 return err; 2712 } 2713 EXPORT_SYMBOL(mmc_resume_host); 2714 2715 /* Do the card removal on suspend if card is assumed removeable 2716 * Do that in pm notifier while userspace isn't yet frozen, so we will be able 2717 to sync the card. 2718 */ 2719 int mmc_pm_notify(struct notifier_block *notify_block, 2720 unsigned long mode, void *unused) 2721 { 2722 struct mmc_host *host = container_of( 2723 notify_block, struct mmc_host, pm_notify); 2724 unsigned long flags; 2725 int err = 0; 2726 2727 switch (mode) { 2728 case PM_HIBERNATION_PREPARE: 2729 case PM_SUSPEND_PREPARE: 2730 if (host->card && mmc_card_mmc(host->card) && 2731 mmc_card_doing_bkops(host->card)) { 2732 err = mmc_stop_bkops(host->card); 2733 if (err) { 2734 pr_err("%s: didn't stop bkops\n", 2735 mmc_hostname(host)); 2736 return err; 2737 } 2738 mmc_card_clr_doing_bkops(host->card); 2739 } 2740 2741 spin_lock_irqsave(&host->lock, flags); 2742 host->rescan_disable = 1; 2743 spin_unlock_irqrestore(&host->lock, flags); 2744 cancel_delayed_work_sync(&host->detect); 2745 2746 if (!host->bus_ops || host->bus_ops->suspend) 2747 break; 2748 2749 /* Calling bus_ops->remove() with a claimed host can deadlock */ 2750 if (host->bus_ops->remove) 2751 host->bus_ops->remove(host); 2752 2753 mmc_claim_host(host); 2754 mmc_detach_bus(host); 2755 mmc_power_off(host); 2756 mmc_release_host(host); 2757 host->pm_flags = 0; 2758 break; 2759 2760 case PM_POST_SUSPEND: 2761 case PM_POST_HIBERNATION: 2762 case PM_POST_RESTORE: 2763 2764 spin_lock_irqsave(&host->lock, flags); 2765 host->rescan_disable = 0; 2766 spin_unlock_irqrestore(&host->lock, flags); 2767 mmc_detect_change(host, 0); 2768 2769 } 2770 2771 return 0; 2772 } 2773 #endif 2774 2775 /** 2776 * mmc_init_context_info() - init synchronization context 2777 * @host: mmc host 2778 * 2779 * Init struct context_info needed to implement asynchronous 2780 * request mechanism, used by mmc core, host driver and mmc requests 2781 * supplier. 2782 */ 2783 void mmc_init_context_info(struct mmc_host *host) 2784 { 2785 spin_lock_init(&host->context_info.lock); 2786 host->context_info.is_new_req = false; 2787 host->context_info.is_done_rcv = false; 2788 host->context_info.is_waiting_last_req = false; 2789 init_waitqueue_head(&host->context_info.wait); 2790 } 2791 2792 static int __init mmc_init(void) 2793 { 2794 int ret; 2795 2796 workqueue = alloc_ordered_workqueue("kmmcd", 0); 2797 if (!workqueue) 2798 return -ENOMEM; 2799 2800 ret = mmc_register_bus(); 2801 if (ret) 2802 goto destroy_workqueue; 2803 2804 ret = mmc_register_host_class(); 2805 if (ret) 2806 goto unregister_bus; 2807 2808 ret = sdio_register_bus(); 2809 if (ret) 2810 goto unregister_host_class; 2811 2812 return 0; 2813 2814 unregister_host_class: 2815 mmc_unregister_host_class(); 2816 unregister_bus: 2817 mmc_unregister_bus(); 2818 destroy_workqueue: 2819 destroy_workqueue(workqueue); 2820 2821 return ret; 2822 } 2823 2824 static void __exit mmc_exit(void) 2825 { 2826 sdio_unregister_bus(); 2827 mmc_unregister_host_class(); 2828 mmc_unregister_bus(); 2829 destroy_workqueue(workqueue); 2830 } 2831 2832 subsys_initcall(mmc_init); 2833 module_exit(mmc_exit); 2834 2835 MODULE_LICENSE("GPL"); 2836