1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ 23 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 24 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ 25 26 static const u8 tuning_blk_pattern_4bit[] = { 27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 35 }; 36 37 static const u8 tuning_blk_pattern_8bit[] = { 38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 54 }; 55 56 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 57 { 58 int err; 59 struct mmc_command cmd = {}; 60 61 cmd.opcode = MMC_SEND_STATUS; 62 if (!mmc_host_is_spi(card->host)) 63 cmd.arg = card->rca << 16; 64 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 65 66 err = mmc_wait_for_cmd(card->host, &cmd, retries); 67 if (err) 68 return err; 69 70 /* NOTE: callers are required to understand the difference 71 * between "native" and SPI format status words! 72 */ 73 if (status) 74 *status = cmd.resp[0]; 75 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(__mmc_send_status); 79 80 int mmc_send_status(struct mmc_card *card, u32 *status) 81 { 82 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 83 } 84 EXPORT_SYMBOL_GPL(mmc_send_status); 85 86 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 87 { 88 struct mmc_command cmd = {}; 89 90 cmd.opcode = MMC_SELECT_CARD; 91 92 if (card) { 93 cmd.arg = card->rca << 16; 94 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 95 } else { 96 cmd.arg = 0; 97 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 98 } 99 100 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 101 } 102 103 int mmc_select_card(struct mmc_card *card) 104 { 105 106 return _mmc_select_card(card->host, card); 107 } 108 109 int mmc_deselect_cards(struct mmc_host *host) 110 { 111 return _mmc_select_card(host, NULL); 112 } 113 114 /* 115 * Write the value specified in the device tree or board code into the optional 116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 117 * drive strength of the DAT and CMD outputs. The actual meaning of a given 118 * value is hardware dependant. 119 * The presence of the DSR register can be determined from the CSD register, 120 * bit 76. 121 */ 122 int mmc_set_dsr(struct mmc_host *host) 123 { 124 struct mmc_command cmd = {}; 125 126 cmd.opcode = MMC_SET_DSR; 127 128 cmd.arg = (host->dsr << 16) | 0xffff; 129 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 130 131 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 132 } 133 134 int mmc_go_idle(struct mmc_host *host) 135 { 136 int err; 137 struct mmc_command cmd = {}; 138 139 /* 140 * Non-SPI hosts need to prevent chipselect going active during 141 * GO_IDLE; that would put chips into SPI mode. Remind them of 142 * that in case of hardware that won't pull up DAT3/nCS otherwise. 143 * 144 * SPI hosts ignore ios.chip_select; it's managed according to 145 * rules that must accommodate non-MMC slaves which this layer 146 * won't even know about. 147 */ 148 if (!mmc_host_is_spi(host)) { 149 mmc_set_chip_select(host, MMC_CS_HIGH); 150 mmc_delay(1); 151 } 152 153 cmd.opcode = MMC_GO_IDLE_STATE; 154 cmd.arg = 0; 155 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 156 157 err = mmc_wait_for_cmd(host, &cmd, 0); 158 159 mmc_delay(1); 160 161 if (!mmc_host_is_spi(host)) { 162 mmc_set_chip_select(host, MMC_CS_DONTCARE); 163 mmc_delay(1); 164 } 165 166 host->use_spi_crc = 0; 167 168 return err; 169 } 170 171 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 172 { 173 struct mmc_command cmd = {}; 174 int i, err = 0; 175 176 cmd.opcode = MMC_SEND_OP_COND; 177 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 178 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 179 180 for (i = 100; i; i--) { 181 err = mmc_wait_for_cmd(host, &cmd, 0); 182 if (err) 183 break; 184 185 /* wait until reset completes */ 186 if (mmc_host_is_spi(host)) { 187 if (!(cmd.resp[0] & R1_SPI_IDLE)) 188 break; 189 } else { 190 if (cmd.resp[0] & MMC_CARD_BUSY) 191 break; 192 } 193 194 err = -ETIMEDOUT; 195 196 mmc_delay(10); 197 198 /* 199 * According to eMMC specification v5.1 section 6.4.3, we 200 * should issue CMD1 repeatedly in the idle state until 201 * the eMMC is ready. Otherwise some eMMC devices seem to enter 202 * the inactive mode after mmc_init_card() issued CMD0 when 203 * the eMMC device is busy. 204 */ 205 if (!ocr && !mmc_host_is_spi(host)) 206 cmd.arg = cmd.resp[0] | BIT(30); 207 } 208 209 if (rocr && !mmc_host_is_spi(host)) 210 *rocr = cmd.resp[0]; 211 212 return err; 213 } 214 215 int mmc_set_relative_addr(struct mmc_card *card) 216 { 217 struct mmc_command cmd = {}; 218 219 cmd.opcode = MMC_SET_RELATIVE_ADDR; 220 cmd.arg = card->rca << 16; 221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 222 223 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 224 } 225 226 static int 227 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 228 { 229 int err; 230 struct mmc_command cmd = {}; 231 232 cmd.opcode = opcode; 233 cmd.arg = arg; 234 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 235 236 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 237 if (err) 238 return err; 239 240 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 241 242 return 0; 243 } 244 245 /* 246 * NOTE: void *buf, caller for the buf is required to use DMA-capable 247 * buffer or on-stack buffer (with some overhead in callee). 248 */ 249 static int 250 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 251 u32 opcode, void *buf, unsigned len) 252 { 253 struct mmc_request mrq = {}; 254 struct mmc_command cmd = {}; 255 struct mmc_data data = {}; 256 struct scatterlist sg; 257 258 mrq.cmd = &cmd; 259 mrq.data = &data; 260 261 cmd.opcode = opcode; 262 cmd.arg = 0; 263 264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 265 * rely on callers to never use this with "native" calls for reading 266 * CSD or CID. Native versions of those commands use the R2 type, 267 * not R1 plus a data block. 268 */ 269 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 270 271 data.blksz = len; 272 data.blocks = 1; 273 data.flags = MMC_DATA_READ; 274 data.sg = &sg; 275 data.sg_len = 1; 276 277 sg_init_one(&sg, buf, len); 278 279 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 280 /* 281 * The spec states that CSR and CID accesses have a timeout 282 * of 64 clock cycles. 283 */ 284 data.timeout_ns = 0; 285 data.timeout_clks = 64; 286 } else 287 mmc_set_data_timeout(&data, card); 288 289 mmc_wait_for_req(host, &mrq); 290 291 if (cmd.error) 292 return cmd.error; 293 if (data.error) 294 return data.error; 295 296 return 0; 297 } 298 299 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd) 300 { 301 int ret, i; 302 __be32 *csd_tmp; 303 304 csd_tmp = kzalloc(16, GFP_KERNEL); 305 if (!csd_tmp) 306 return -ENOMEM; 307 308 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); 309 if (ret) 310 goto err; 311 312 for (i = 0; i < 4; i++) 313 csd[i] = be32_to_cpu(csd_tmp[i]); 314 315 err: 316 kfree(csd_tmp); 317 return ret; 318 } 319 320 int mmc_send_csd(struct mmc_card *card, u32 *csd) 321 { 322 if (mmc_host_is_spi(card->host)) 323 return mmc_spi_send_csd(card, csd); 324 325 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 326 MMC_SEND_CSD); 327 } 328 329 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid) 330 { 331 int ret, i; 332 __be32 *cid_tmp; 333 334 cid_tmp = kzalloc(16, GFP_KERNEL); 335 if (!cid_tmp) 336 return -ENOMEM; 337 338 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); 339 if (ret) 340 goto err; 341 342 for (i = 0; i < 4; i++) 343 cid[i] = be32_to_cpu(cid_tmp[i]); 344 345 err: 346 kfree(cid_tmp); 347 return ret; 348 } 349 350 int mmc_send_cid(struct mmc_host *host, u32 *cid) 351 { 352 if (mmc_host_is_spi(host)) 353 return mmc_spi_send_cid(host, cid); 354 355 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 356 } 357 358 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 359 { 360 int err; 361 u8 *ext_csd; 362 363 if (!card || !new_ext_csd) 364 return -EINVAL; 365 366 if (!mmc_can_ext_csd(card)) 367 return -EOPNOTSUPP; 368 369 /* 370 * As the ext_csd is so large and mostly unused, we don't store the 371 * raw block in mmc_card. 372 */ 373 ext_csd = kzalloc(512, GFP_KERNEL); 374 if (!ext_csd) 375 return -ENOMEM; 376 377 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 378 512); 379 if (err) 380 kfree(ext_csd); 381 else 382 *new_ext_csd = ext_csd; 383 384 return err; 385 } 386 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 387 388 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 389 { 390 struct mmc_command cmd = {}; 391 int err; 392 393 cmd.opcode = MMC_SPI_READ_OCR; 394 cmd.arg = highcap ? (1 << 30) : 0; 395 cmd.flags = MMC_RSP_SPI_R3; 396 397 err = mmc_wait_for_cmd(host, &cmd, 0); 398 399 *ocrp = cmd.resp[1]; 400 return err; 401 } 402 403 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 404 { 405 struct mmc_command cmd = {}; 406 int err; 407 408 cmd.opcode = MMC_SPI_CRC_ON_OFF; 409 cmd.flags = MMC_RSP_SPI_R1; 410 cmd.arg = use_crc; 411 412 err = mmc_wait_for_cmd(host, &cmd, 0); 413 if (!err) 414 host->use_spi_crc = use_crc; 415 return err; 416 } 417 418 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 419 { 420 if (mmc_host_is_spi(host)) { 421 if (status & R1_SPI_ILLEGAL_COMMAND) 422 return -EBADMSG; 423 } else { 424 if (R1_STATUS(status)) 425 pr_warn("%s: unexpected status %#x after switch\n", 426 mmc_hostname(host), status); 427 if (status & R1_SWITCH_ERROR) 428 return -EBADMSG; 429 } 430 return 0; 431 } 432 433 /* Caller must hold re-tuning */ 434 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 435 { 436 u32 status; 437 int err; 438 439 err = mmc_send_status(card, &status); 440 if (!crc_err_fatal && err == -EILSEQ) 441 return 0; 442 if (err) 443 return err; 444 445 return mmc_switch_status_error(card->host, status); 446 } 447 448 int mmc_switch_status(struct mmc_card *card) 449 { 450 return __mmc_switch_status(card, true); 451 } 452 453 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 454 bool send_status, bool retry_crc_err) 455 { 456 struct mmc_host *host = card->host; 457 int err; 458 unsigned long timeout; 459 u32 status = 0; 460 bool expired = false; 461 bool busy = false; 462 463 /* 464 * In cases when not allowed to poll by using CMD13 or because we aren't 465 * capable of polling by using ->card_busy(), then rely on waiting the 466 * stated timeout to be sufficient. 467 */ 468 if (!send_status && !host->ops->card_busy) { 469 mmc_delay(timeout_ms); 470 return 0; 471 } 472 473 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 474 do { 475 /* 476 * Due to the possibility of being preempted while polling, 477 * check the expiration time first. 478 */ 479 expired = time_after(jiffies, timeout); 480 481 if (host->ops->card_busy) { 482 busy = host->ops->card_busy(host); 483 } else { 484 err = mmc_send_status(card, &status); 485 if (retry_crc_err && err == -EILSEQ) { 486 busy = true; 487 } else if (err) { 488 return err; 489 } else { 490 err = mmc_switch_status_error(host, status); 491 if (err) 492 return err; 493 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG; 494 } 495 } 496 497 /* Timeout if the device still remains busy. */ 498 if (expired && busy) { 499 pr_err("%s: Card stuck being busy! %s\n", 500 mmc_hostname(host), __func__); 501 return -ETIMEDOUT; 502 } 503 } while (busy); 504 505 return 0; 506 } 507 508 /** 509 * __mmc_switch - modify EXT_CSD register 510 * @card: the MMC card associated with the data transfer 511 * @set: cmd set values 512 * @index: EXT_CSD register index 513 * @value: value to program into EXT_CSD register 514 * @timeout_ms: timeout (ms) for operation performed by register write, 515 * timeout of zero implies maximum possible timeout 516 * @timing: new timing to change to 517 * @use_busy_signal: use the busy signal as response type 518 * @send_status: send status cmd to poll for busy 519 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 520 * 521 * Modifies the EXT_CSD register for selected card. 522 */ 523 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 524 unsigned int timeout_ms, unsigned char timing, 525 bool use_busy_signal, bool send_status, bool retry_crc_err) 526 { 527 struct mmc_host *host = card->host; 528 int err; 529 struct mmc_command cmd = {}; 530 bool use_r1b_resp = use_busy_signal; 531 unsigned char old_timing = host->ios.timing; 532 533 mmc_retune_hold(host); 534 535 if (!timeout_ms) { 536 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 537 mmc_hostname(host)); 538 timeout_ms = card->ext_csd.generic_cmd6_time; 539 } 540 541 /* 542 * If the max_busy_timeout of the host is specified, make sure it's 543 * enough to fit the used timeout_ms. In case it's not, let's instruct 544 * the host to avoid HW busy detection, by converting to a R1 response 545 * instead of a R1B. 546 */ 547 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) 548 use_r1b_resp = false; 549 550 cmd.opcode = MMC_SWITCH; 551 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 552 (index << 16) | 553 (value << 8) | 554 set; 555 cmd.flags = MMC_CMD_AC; 556 if (use_r1b_resp) { 557 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; 558 cmd.busy_timeout = timeout_ms; 559 } else { 560 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; 561 } 562 563 if (index == EXT_CSD_SANITIZE_START) 564 cmd.sanitize_busy = true; 565 566 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 567 if (err) 568 goto out; 569 570 /* No need to check card status in case of unblocking command */ 571 if (!use_busy_signal) 572 goto out; 573 574 /*If SPI or used HW busy detection above, then we don't need to poll. */ 575 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 576 mmc_host_is_spi(host)) 577 goto out_tim; 578 579 /* Let's try to poll to find out when the command is completed. */ 580 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); 581 if (err) 582 goto out; 583 584 out_tim: 585 /* Switch to new timing before check switch status. */ 586 if (timing) 587 mmc_set_timing(host, timing); 588 589 if (send_status) { 590 err = mmc_switch_status(card); 591 if (err && timing) 592 mmc_set_timing(host, old_timing); 593 } 594 out: 595 mmc_retune_release(host); 596 597 return err; 598 } 599 600 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 601 unsigned int timeout_ms) 602 { 603 return __mmc_switch(card, set, index, value, timeout_ms, 0, 604 true, true, false); 605 } 606 EXPORT_SYMBOL_GPL(mmc_switch); 607 608 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 609 { 610 struct mmc_request mrq = {}; 611 struct mmc_command cmd = {}; 612 struct mmc_data data = {}; 613 struct scatterlist sg; 614 struct mmc_ios *ios = &host->ios; 615 const u8 *tuning_block_pattern; 616 int size, err = 0; 617 u8 *data_buf; 618 619 if (ios->bus_width == MMC_BUS_WIDTH_8) { 620 tuning_block_pattern = tuning_blk_pattern_8bit; 621 size = sizeof(tuning_blk_pattern_8bit); 622 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 623 tuning_block_pattern = tuning_blk_pattern_4bit; 624 size = sizeof(tuning_blk_pattern_4bit); 625 } else 626 return -EINVAL; 627 628 data_buf = kzalloc(size, GFP_KERNEL); 629 if (!data_buf) 630 return -ENOMEM; 631 632 mrq.cmd = &cmd; 633 mrq.data = &data; 634 635 cmd.opcode = opcode; 636 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 637 638 data.blksz = size; 639 data.blocks = 1; 640 data.flags = MMC_DATA_READ; 641 642 /* 643 * According to the tuning specs, Tuning process 644 * is normally shorter 40 executions of CMD19, 645 * and timeout value should be shorter than 150 ms 646 */ 647 data.timeout_ns = 150 * NSEC_PER_MSEC; 648 649 data.sg = &sg; 650 data.sg_len = 1; 651 sg_init_one(&sg, data_buf, size); 652 653 mmc_wait_for_req(host, &mrq); 654 655 if (cmd_error) 656 *cmd_error = cmd.error; 657 658 if (cmd.error) { 659 err = cmd.error; 660 goto out; 661 } 662 663 if (data.error) { 664 err = data.error; 665 goto out; 666 } 667 668 if (memcmp(data_buf, tuning_block_pattern, size)) 669 err = -EIO; 670 671 out: 672 kfree(data_buf); 673 return err; 674 } 675 EXPORT_SYMBOL_GPL(mmc_send_tuning); 676 677 int mmc_abort_tuning(struct mmc_host *host, u32 opcode) 678 { 679 struct mmc_command cmd = {}; 680 681 /* 682 * eMMC specification specifies that CMD12 can be used to stop a tuning 683 * command, but SD specification does not, so do nothing unless it is 684 * eMMC. 685 */ 686 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 687 return 0; 688 689 cmd.opcode = MMC_STOP_TRANSMISSION; 690 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 691 692 /* 693 * For drivers that override R1 to R1b, set an arbitrary timeout based 694 * on the tuning timeout i.e. 150ms. 695 */ 696 cmd.busy_timeout = 150; 697 698 return mmc_wait_for_cmd(host, &cmd, 0); 699 } 700 EXPORT_SYMBOL_GPL(mmc_abort_tuning); 701 702 static int 703 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 704 u8 len) 705 { 706 struct mmc_request mrq = {}; 707 struct mmc_command cmd = {}; 708 struct mmc_data data = {}; 709 struct scatterlist sg; 710 u8 *data_buf; 711 u8 *test_buf; 712 int i, err; 713 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 714 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 715 716 /* dma onto stack is unsafe/nonportable, but callers to this 717 * routine normally provide temporary on-stack buffers ... 718 */ 719 data_buf = kmalloc(len, GFP_KERNEL); 720 if (!data_buf) 721 return -ENOMEM; 722 723 if (len == 8) 724 test_buf = testdata_8bit; 725 else if (len == 4) 726 test_buf = testdata_4bit; 727 else { 728 pr_err("%s: Invalid bus_width %d\n", 729 mmc_hostname(host), len); 730 kfree(data_buf); 731 return -EINVAL; 732 } 733 734 if (opcode == MMC_BUS_TEST_W) 735 memcpy(data_buf, test_buf, len); 736 737 mrq.cmd = &cmd; 738 mrq.data = &data; 739 cmd.opcode = opcode; 740 cmd.arg = 0; 741 742 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 743 * rely on callers to never use this with "native" calls for reading 744 * CSD or CID. Native versions of those commands use the R2 type, 745 * not R1 plus a data block. 746 */ 747 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 748 749 data.blksz = len; 750 data.blocks = 1; 751 if (opcode == MMC_BUS_TEST_R) 752 data.flags = MMC_DATA_READ; 753 else 754 data.flags = MMC_DATA_WRITE; 755 756 data.sg = &sg; 757 data.sg_len = 1; 758 mmc_set_data_timeout(&data, card); 759 sg_init_one(&sg, data_buf, len); 760 mmc_wait_for_req(host, &mrq); 761 err = 0; 762 if (opcode == MMC_BUS_TEST_R) { 763 for (i = 0; i < len / 4; i++) 764 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 765 err = -EIO; 766 break; 767 } 768 } 769 kfree(data_buf); 770 771 if (cmd.error) 772 return cmd.error; 773 if (data.error) 774 return data.error; 775 776 return err; 777 } 778 779 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 780 { 781 int width; 782 783 if (bus_width == MMC_BUS_WIDTH_8) 784 width = 8; 785 else if (bus_width == MMC_BUS_WIDTH_4) 786 width = 4; 787 else if (bus_width == MMC_BUS_WIDTH_1) 788 return 0; /* no need for test */ 789 else 790 return -EINVAL; 791 792 /* 793 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 794 * is a problem. This improves chances that the test will work. 795 */ 796 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 797 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 798 } 799 800 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) 801 { 802 struct mmc_command cmd = {}; 803 unsigned int opcode; 804 int err; 805 806 opcode = card->ext_csd.hpi_cmd; 807 if (opcode == MMC_STOP_TRANSMISSION) 808 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 809 else if (opcode == MMC_SEND_STATUS) 810 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 811 812 cmd.opcode = opcode; 813 cmd.arg = card->rca << 16 | 1; 814 815 err = mmc_wait_for_cmd(card->host, &cmd, 0); 816 if (err) { 817 pr_warn("%s: error %d interrupting operation. " 818 "HPI command response %#x\n", mmc_hostname(card->host), 819 err, cmd.resp[0]); 820 return err; 821 } 822 if (status) 823 *status = cmd.resp[0]; 824 825 return 0; 826 } 827 828 /** 829 * mmc_interrupt_hpi - Issue for High priority Interrupt 830 * @card: the MMC card associated with the HPI transfer 831 * 832 * Issued High Priority Interrupt, and check for card status 833 * until out-of prg-state. 834 */ 835 int mmc_interrupt_hpi(struct mmc_card *card) 836 { 837 int err; 838 u32 status; 839 unsigned long prg_wait; 840 841 if (!card->ext_csd.hpi_en) { 842 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 843 return 1; 844 } 845 846 err = mmc_send_status(card, &status); 847 if (err) { 848 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 849 goto out; 850 } 851 852 switch (R1_CURRENT_STATE(status)) { 853 case R1_STATE_IDLE: 854 case R1_STATE_READY: 855 case R1_STATE_STBY: 856 case R1_STATE_TRAN: 857 /* 858 * In idle and transfer states, HPI is not needed and the caller 859 * can issue the next intended command immediately 860 */ 861 goto out; 862 case R1_STATE_PRG: 863 break; 864 default: 865 /* In all other states, it's illegal to issue HPI */ 866 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 867 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 868 err = -EINVAL; 869 goto out; 870 } 871 872 err = mmc_send_hpi_cmd(card, &status); 873 if (err) 874 goto out; 875 876 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 877 do { 878 err = mmc_send_status(card, &status); 879 880 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 881 break; 882 if (time_after(jiffies, prg_wait)) 883 err = -ETIMEDOUT; 884 } while (!err); 885 886 out: 887 return err; 888 } 889 890 int mmc_can_ext_csd(struct mmc_card *card) 891 { 892 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 893 } 894 895 static int mmc_read_bkops_status(struct mmc_card *card) 896 { 897 int err; 898 u8 *ext_csd; 899 900 err = mmc_get_ext_csd(card, &ext_csd); 901 if (err) 902 return err; 903 904 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 905 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 906 kfree(ext_csd); 907 return 0; 908 } 909 910 /** 911 * mmc_run_bkops - Run BKOPS for supported cards 912 * @card: MMC card to run BKOPS for 913 * 914 * Run background operations synchronously for cards having manual BKOPS 915 * enabled and in case it reports urgent BKOPS level. 916 */ 917 void mmc_run_bkops(struct mmc_card *card) 918 { 919 int err; 920 921 if (!card->ext_csd.man_bkops_en) 922 return; 923 924 err = mmc_read_bkops_status(card); 925 if (err) { 926 pr_err("%s: Failed to read bkops status: %d\n", 927 mmc_hostname(card->host), err); 928 return; 929 } 930 931 if (!card->ext_csd.raw_bkops_status || 932 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 933 return; 934 935 mmc_retune_hold(card->host); 936 937 /* 938 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 939 * synchronously. Future wise, we may consider to start BKOPS, for less 940 * urgent levels by using an asynchronous background task, when idle. 941 */ 942 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 943 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 944 if (err) 945 pr_warn("%s: Error %d starting bkops\n", 946 mmc_hostname(card->host), err); 947 948 mmc_retune_release(card->host); 949 } 950 EXPORT_SYMBOL(mmc_run_bkops); 951 952 /* 953 * Flush the cache to the non-volatile storage. 954 */ 955 int mmc_flush_cache(struct mmc_card *card) 956 { 957 int err = 0; 958 959 if (mmc_card_mmc(card) && 960 (card->ext_csd.cache_size > 0) && 961 (card->ext_csd.cache_ctrl & 1)) { 962 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 963 EXT_CSD_FLUSH_CACHE, 1, 964 MMC_CACHE_FLUSH_TIMEOUT_MS); 965 if (err) 966 pr_err("%s: cache flush error %d\n", 967 mmc_hostname(card->host), err); 968 } 969 970 return err; 971 } 972 EXPORT_SYMBOL(mmc_flush_cache); 973 974 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 975 { 976 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 977 int err; 978 979 if (!card->ext_csd.cmdq_support) 980 return -EOPNOTSUPP; 981 982 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 983 val, card->ext_csd.generic_cmd6_time); 984 if (!err) 985 card->ext_csd.cmdq_en = enable; 986 987 return err; 988 } 989 990 int mmc_cmdq_enable(struct mmc_card *card) 991 { 992 return mmc_cmdq_switch(card, true); 993 } 994 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 995 996 int mmc_cmdq_disable(struct mmc_card *card) 997 { 998 return mmc_cmdq_switch(card, false); 999 } 1000 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1001