1 /* 2 * linux/drivers/mmc/core/mmc_ops.h 3 * 4 * Copyright 2006-2007 Pierre Ossman 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/types.h> 14 #include <linux/scatterlist.h> 15 16 #include <linux/mmc/host.h> 17 #include <linux/mmc/card.h> 18 #include <linux/mmc/mmc.h> 19 20 #include "core.h" 21 #include "mmc_ops.h" 22 23 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 24 { 25 int err; 26 struct mmc_command cmd = {0}; 27 28 BUG_ON(!host); 29 30 cmd.opcode = MMC_SELECT_CARD; 31 32 if (card) { 33 cmd.arg = card->rca << 16; 34 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 35 } else { 36 cmd.arg = 0; 37 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 38 } 39 40 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 41 if (err) 42 return err; 43 44 return 0; 45 } 46 47 int mmc_select_card(struct mmc_card *card) 48 { 49 BUG_ON(!card); 50 51 return _mmc_select_card(card->host, card); 52 } 53 54 int mmc_deselect_cards(struct mmc_host *host) 55 { 56 return _mmc_select_card(host, NULL); 57 } 58 59 int mmc_card_sleepawake(struct mmc_host *host, int sleep) 60 { 61 struct mmc_command cmd = {0}; 62 struct mmc_card *card = host->card; 63 int err; 64 65 if (sleep) 66 mmc_deselect_cards(host); 67 68 cmd.opcode = MMC_SLEEP_AWAKE; 69 cmd.arg = card->rca << 16; 70 if (sleep) 71 cmd.arg |= 1 << 15; 72 73 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 74 err = mmc_wait_for_cmd(host, &cmd, 0); 75 if (err) 76 return err; 77 78 /* 79 * If the host does not wait while the card signals busy, then we will 80 * will have to wait the sleep/awake timeout. Note, we cannot use the 81 * SEND_STATUS command to poll the status because that command (and most 82 * others) is invalid while the card sleeps. 83 */ 84 if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) 85 mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); 86 87 if (!sleep) 88 err = mmc_select_card(card); 89 90 return err; 91 } 92 93 int mmc_go_idle(struct mmc_host *host) 94 { 95 int err; 96 struct mmc_command cmd = {0}; 97 98 /* 99 * Non-SPI hosts need to prevent chipselect going active during 100 * GO_IDLE; that would put chips into SPI mode. Remind them of 101 * that in case of hardware that won't pull up DAT3/nCS otherwise. 102 * 103 * SPI hosts ignore ios.chip_select; it's managed according to 104 * rules that must accommodate non-MMC slaves which this layer 105 * won't even know about. 106 */ 107 if (!mmc_host_is_spi(host)) { 108 mmc_set_chip_select(host, MMC_CS_HIGH); 109 mmc_delay(1); 110 } 111 112 cmd.opcode = MMC_GO_IDLE_STATE; 113 cmd.arg = 0; 114 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 115 116 err = mmc_wait_for_cmd(host, &cmd, 0); 117 118 mmc_delay(1); 119 120 if (!mmc_host_is_spi(host)) { 121 mmc_set_chip_select(host, MMC_CS_DONTCARE); 122 mmc_delay(1); 123 } 124 125 host->use_spi_crc = 0; 126 127 return err; 128 } 129 130 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 131 { 132 struct mmc_command cmd = {0}; 133 int i, err = 0; 134 135 BUG_ON(!host); 136 137 cmd.opcode = MMC_SEND_OP_COND; 138 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 139 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 140 141 for (i = 100; i; i--) { 142 err = mmc_wait_for_cmd(host, &cmd, 0); 143 if (err) 144 break; 145 146 /* if we're just probing, do a single pass */ 147 if (ocr == 0) 148 break; 149 150 /* otherwise wait until reset completes */ 151 if (mmc_host_is_spi(host)) { 152 if (!(cmd.resp[0] & R1_SPI_IDLE)) 153 break; 154 } else { 155 if (cmd.resp[0] & MMC_CARD_BUSY) 156 break; 157 } 158 159 err = -ETIMEDOUT; 160 161 mmc_delay(10); 162 } 163 164 if (rocr && !mmc_host_is_spi(host)) 165 *rocr = cmd.resp[0]; 166 167 return err; 168 } 169 170 int mmc_all_send_cid(struct mmc_host *host, u32 *cid) 171 { 172 int err; 173 struct mmc_command cmd = {0}; 174 175 BUG_ON(!host); 176 BUG_ON(!cid); 177 178 cmd.opcode = MMC_ALL_SEND_CID; 179 cmd.arg = 0; 180 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; 181 182 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 183 if (err) 184 return err; 185 186 memcpy(cid, cmd.resp, sizeof(u32) * 4); 187 188 return 0; 189 } 190 191 int mmc_set_relative_addr(struct mmc_card *card) 192 { 193 int err; 194 struct mmc_command cmd = {0}; 195 196 BUG_ON(!card); 197 BUG_ON(!card->host); 198 199 cmd.opcode = MMC_SET_RELATIVE_ADDR; 200 cmd.arg = card->rca << 16; 201 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 202 203 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 204 if (err) 205 return err; 206 207 return 0; 208 } 209 210 static int 211 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 212 { 213 int err; 214 struct mmc_command cmd = {0}; 215 216 BUG_ON(!host); 217 BUG_ON(!cxd); 218 219 cmd.opcode = opcode; 220 cmd.arg = arg; 221 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 222 223 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 224 if (err) 225 return err; 226 227 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 228 229 return 0; 230 } 231 232 static int 233 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 234 u32 opcode, void *buf, unsigned len) 235 { 236 struct mmc_request mrq = {0}; 237 struct mmc_command cmd = {0}; 238 struct mmc_data data = {0}; 239 struct scatterlist sg; 240 void *data_buf; 241 242 /* dma onto stack is unsafe/nonportable, but callers to this 243 * routine normally provide temporary on-stack buffers ... 244 */ 245 data_buf = kmalloc(len, GFP_KERNEL); 246 if (data_buf == NULL) 247 return -ENOMEM; 248 249 mrq.cmd = &cmd; 250 mrq.data = &data; 251 252 cmd.opcode = opcode; 253 cmd.arg = 0; 254 255 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 256 * rely on callers to never use this with "native" calls for reading 257 * CSD or CID. Native versions of those commands use the R2 type, 258 * not R1 plus a data block. 259 */ 260 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 261 262 data.blksz = len; 263 data.blocks = 1; 264 data.flags = MMC_DATA_READ; 265 data.sg = &sg; 266 data.sg_len = 1; 267 268 sg_init_one(&sg, data_buf, len); 269 270 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 271 /* 272 * The spec states that CSR and CID accesses have a timeout 273 * of 64 clock cycles. 274 */ 275 data.timeout_ns = 0; 276 data.timeout_clks = 64; 277 } else 278 mmc_set_data_timeout(&data, card); 279 280 mmc_wait_for_req(host, &mrq); 281 282 memcpy(buf, data_buf, len); 283 kfree(data_buf); 284 285 if (cmd.error) 286 return cmd.error; 287 if (data.error) 288 return data.error; 289 290 return 0; 291 } 292 293 int mmc_send_csd(struct mmc_card *card, u32 *csd) 294 { 295 int ret, i; 296 297 if (!mmc_host_is_spi(card->host)) 298 return mmc_send_cxd_native(card->host, card->rca << 16, 299 csd, MMC_SEND_CSD); 300 301 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); 302 if (ret) 303 return ret; 304 305 for (i = 0;i < 4;i++) 306 csd[i] = be32_to_cpu(csd[i]); 307 308 return 0; 309 } 310 311 int mmc_send_cid(struct mmc_host *host, u32 *cid) 312 { 313 int ret, i; 314 315 if (!mmc_host_is_spi(host)) { 316 if (!host->card) 317 return -EINVAL; 318 return mmc_send_cxd_native(host, host->card->rca << 16, 319 cid, MMC_SEND_CID); 320 } 321 322 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); 323 if (ret) 324 return ret; 325 326 for (i = 0;i < 4;i++) 327 cid[i] = be32_to_cpu(cid[i]); 328 329 return 0; 330 } 331 332 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) 333 { 334 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, 335 ext_csd, 512); 336 } 337 338 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 339 { 340 struct mmc_command cmd = {0}; 341 int err; 342 343 cmd.opcode = MMC_SPI_READ_OCR; 344 cmd.arg = highcap ? (1 << 30) : 0; 345 cmd.flags = MMC_RSP_SPI_R3; 346 347 err = mmc_wait_for_cmd(host, &cmd, 0); 348 349 *ocrp = cmd.resp[1]; 350 return err; 351 } 352 353 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 354 { 355 struct mmc_command cmd = {0}; 356 int err; 357 358 cmd.opcode = MMC_SPI_CRC_ON_OFF; 359 cmd.flags = MMC_RSP_SPI_R1; 360 cmd.arg = use_crc; 361 362 err = mmc_wait_for_cmd(host, &cmd, 0); 363 if (!err) 364 host->use_spi_crc = use_crc; 365 return err; 366 } 367 368 /** 369 * mmc_switch - modify EXT_CSD register 370 * @card: the MMC card associated with the data transfer 371 * @set: cmd set values 372 * @index: EXT_CSD register index 373 * @value: value to program into EXT_CSD register 374 * @timeout_ms: timeout (ms) for operation performed by register write, 375 * timeout of zero implies maximum possible timeout 376 * 377 * Modifies the EXT_CSD register for selected card. 378 */ 379 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 380 unsigned int timeout_ms) 381 { 382 int err; 383 struct mmc_command cmd = {0}; 384 u32 status; 385 386 BUG_ON(!card); 387 BUG_ON(!card->host); 388 389 cmd.opcode = MMC_SWITCH; 390 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 391 (index << 16) | 392 (value << 8) | 393 set; 394 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 395 cmd.cmd_timeout_ms = timeout_ms; 396 397 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 398 if (err) 399 return err; 400 401 /* Must check status to be sure of no errors */ 402 do { 403 err = mmc_send_status(card, &status); 404 if (err) 405 return err; 406 if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 407 break; 408 if (mmc_host_is_spi(card->host)) 409 break; 410 } while (R1_CURRENT_STATE(status) == 7); 411 412 if (mmc_host_is_spi(card->host)) { 413 if (status & R1_SPI_ILLEGAL_COMMAND) 414 return -EBADMSG; 415 } else { 416 if (status & 0xFDFFA000) 417 printk(KERN_WARNING "%s: unexpected status %#x after " 418 "switch", mmc_hostname(card->host), status); 419 if (status & R1_SWITCH_ERROR) 420 return -EBADMSG; 421 } 422 423 return 0; 424 } 425 EXPORT_SYMBOL_GPL(mmc_switch); 426 427 int mmc_send_status(struct mmc_card *card, u32 *status) 428 { 429 int err; 430 struct mmc_command cmd = {0}; 431 432 BUG_ON(!card); 433 BUG_ON(!card->host); 434 435 cmd.opcode = MMC_SEND_STATUS; 436 if (!mmc_host_is_spi(card->host)) 437 cmd.arg = card->rca << 16; 438 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 439 440 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 441 if (err) 442 return err; 443 444 /* NOTE: callers are required to understand the difference 445 * between "native" and SPI format status words! 446 */ 447 if (status) 448 *status = cmd.resp[0]; 449 450 return 0; 451 } 452 453 static int 454 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 455 u8 len) 456 { 457 struct mmc_request mrq = {0}; 458 struct mmc_command cmd = {0}; 459 struct mmc_data data = {0}; 460 struct scatterlist sg; 461 u8 *data_buf; 462 u8 *test_buf; 463 int i, err; 464 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 465 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 466 467 /* dma onto stack is unsafe/nonportable, but callers to this 468 * routine normally provide temporary on-stack buffers ... 469 */ 470 data_buf = kmalloc(len, GFP_KERNEL); 471 if (!data_buf) 472 return -ENOMEM; 473 474 if (len == 8) 475 test_buf = testdata_8bit; 476 else if (len == 4) 477 test_buf = testdata_4bit; 478 else { 479 printk(KERN_ERR "%s: Invalid bus_width %d\n", 480 mmc_hostname(host), len); 481 kfree(data_buf); 482 return -EINVAL; 483 } 484 485 if (opcode == MMC_BUS_TEST_W) 486 memcpy(data_buf, test_buf, len); 487 488 mrq.cmd = &cmd; 489 mrq.data = &data; 490 cmd.opcode = opcode; 491 cmd.arg = 0; 492 493 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 494 * rely on callers to never use this with "native" calls for reading 495 * CSD or CID. Native versions of those commands use the R2 type, 496 * not R1 plus a data block. 497 */ 498 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 499 500 data.blksz = len; 501 data.blocks = 1; 502 if (opcode == MMC_BUS_TEST_R) 503 data.flags = MMC_DATA_READ; 504 else 505 data.flags = MMC_DATA_WRITE; 506 507 data.sg = &sg; 508 data.sg_len = 1; 509 sg_init_one(&sg, data_buf, len); 510 mmc_wait_for_req(host, &mrq); 511 err = 0; 512 if (opcode == MMC_BUS_TEST_R) { 513 for (i = 0; i < len / 4; i++) 514 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 515 err = -EIO; 516 break; 517 } 518 } 519 kfree(data_buf); 520 521 if (cmd.error) 522 return cmd.error; 523 if (data.error) 524 return data.error; 525 526 return err; 527 } 528 529 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 530 { 531 int err, width; 532 533 if (bus_width == MMC_BUS_WIDTH_8) 534 width = 8; 535 else if (bus_width == MMC_BUS_WIDTH_4) 536 width = 4; 537 else if (bus_width == MMC_BUS_WIDTH_1) 538 return 0; /* no need for test */ 539 else 540 return -EINVAL; 541 542 /* 543 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 544 * is a problem. This improves chances that the test will work. 545 */ 546 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 547 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 548 return err; 549 } 550