1 /* 2 * linux/drivers/mmc/core/mmc_ops.h 3 * 4 * Copyright 2006-2007 Pierre Ossman 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/types.h> 14 #include <linux/scatterlist.h> 15 16 #include <linux/mmc/host.h> 17 #include <linux/mmc/card.h> 18 #include <linux/mmc/mmc.h> 19 20 #include "core.h" 21 #include "mmc_ops.h" 22 23 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 24 { 25 int err; 26 struct mmc_command cmd; 27 28 BUG_ON(!host); 29 30 memset(&cmd, 0, sizeof(struct mmc_command)); 31 32 cmd.opcode = MMC_SELECT_CARD; 33 34 if (card) { 35 cmd.arg = card->rca << 16; 36 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 37 } else { 38 cmd.arg = 0; 39 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 40 } 41 42 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 43 if (err) 44 return err; 45 46 return 0; 47 } 48 49 int mmc_select_card(struct mmc_card *card) 50 { 51 BUG_ON(!card); 52 53 return _mmc_select_card(card->host, card); 54 } 55 56 int mmc_deselect_cards(struct mmc_host *host) 57 { 58 return _mmc_select_card(host, NULL); 59 } 60 61 int mmc_card_sleepawake(struct mmc_host *host, int sleep) 62 { 63 struct mmc_command cmd; 64 struct mmc_card *card = host->card; 65 int err; 66 67 if (sleep) 68 mmc_deselect_cards(host); 69 70 memset(&cmd, 0, sizeof(struct mmc_command)); 71 72 cmd.opcode = MMC_SLEEP_AWAKE; 73 cmd.arg = card->rca << 16; 74 if (sleep) 75 cmd.arg |= 1 << 15; 76 77 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 78 err = mmc_wait_for_cmd(host, &cmd, 0); 79 if (err) 80 return err; 81 82 /* 83 * If the host does not wait while the card signals busy, then we will 84 * will have to wait the sleep/awake timeout. Note, we cannot use the 85 * SEND_STATUS command to poll the status because that command (and most 86 * others) is invalid while the card sleeps. 87 */ 88 if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) 89 mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); 90 91 if (!sleep) 92 err = mmc_select_card(card); 93 94 return err; 95 } 96 97 int mmc_go_idle(struct mmc_host *host) 98 { 99 int err; 100 struct mmc_command cmd; 101 102 /* 103 * Non-SPI hosts need to prevent chipselect going active during 104 * GO_IDLE; that would put chips into SPI mode. Remind them of 105 * that in case of hardware that won't pull up DAT3/nCS otherwise. 106 * 107 * SPI hosts ignore ios.chip_select; it's managed according to 108 * rules that must accommodate non-MMC slaves which this layer 109 * won't even know about. 110 */ 111 if (!mmc_host_is_spi(host)) { 112 mmc_set_chip_select(host, MMC_CS_HIGH); 113 mmc_delay(1); 114 } 115 116 memset(&cmd, 0, sizeof(struct mmc_command)); 117 118 cmd.opcode = MMC_GO_IDLE_STATE; 119 cmd.arg = 0; 120 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 121 122 err = mmc_wait_for_cmd(host, &cmd, 0); 123 124 mmc_delay(1); 125 126 if (!mmc_host_is_spi(host)) { 127 mmc_set_chip_select(host, MMC_CS_DONTCARE); 128 mmc_delay(1); 129 } 130 131 host->use_spi_crc = 0; 132 133 return err; 134 } 135 136 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 137 { 138 struct mmc_command cmd; 139 int i, err = 0; 140 141 BUG_ON(!host); 142 143 memset(&cmd, 0, sizeof(struct mmc_command)); 144 145 cmd.opcode = MMC_SEND_OP_COND; 146 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 147 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 148 149 for (i = 100; i; i--) { 150 err = mmc_wait_for_cmd(host, &cmd, 0); 151 if (err) 152 break; 153 154 /* if we're just probing, do a single pass */ 155 if (ocr == 0) 156 break; 157 158 /* otherwise wait until reset completes */ 159 if (mmc_host_is_spi(host)) { 160 if (!(cmd.resp[0] & R1_SPI_IDLE)) 161 break; 162 } else { 163 if (cmd.resp[0] & MMC_CARD_BUSY) 164 break; 165 } 166 167 err = -ETIMEDOUT; 168 169 mmc_delay(10); 170 } 171 172 if (rocr && !mmc_host_is_spi(host)) 173 *rocr = cmd.resp[0]; 174 175 return err; 176 } 177 178 int mmc_all_send_cid(struct mmc_host *host, u32 *cid) 179 { 180 int err; 181 struct mmc_command cmd; 182 183 BUG_ON(!host); 184 BUG_ON(!cid); 185 186 memset(&cmd, 0, sizeof(struct mmc_command)); 187 188 cmd.opcode = MMC_ALL_SEND_CID; 189 cmd.arg = 0; 190 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; 191 192 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 193 if (err) 194 return err; 195 196 memcpy(cid, cmd.resp, sizeof(u32) * 4); 197 198 return 0; 199 } 200 201 int mmc_set_relative_addr(struct mmc_card *card) 202 { 203 int err; 204 struct mmc_command cmd; 205 206 BUG_ON(!card); 207 BUG_ON(!card->host); 208 209 memset(&cmd, 0, sizeof(struct mmc_command)); 210 211 cmd.opcode = MMC_SET_RELATIVE_ADDR; 212 cmd.arg = card->rca << 16; 213 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 214 215 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 216 if (err) 217 return err; 218 219 return 0; 220 } 221 222 static int 223 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 224 { 225 int err; 226 struct mmc_command cmd; 227 228 BUG_ON(!host); 229 BUG_ON(!cxd); 230 231 memset(&cmd, 0, sizeof(struct mmc_command)); 232 233 cmd.opcode = opcode; 234 cmd.arg = arg; 235 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 236 237 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 238 if (err) 239 return err; 240 241 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 242 243 return 0; 244 } 245 246 static int 247 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 248 u32 opcode, void *buf, unsigned len) 249 { 250 struct mmc_request mrq; 251 struct mmc_command cmd; 252 struct mmc_data data; 253 struct scatterlist sg; 254 void *data_buf; 255 256 /* dma onto stack is unsafe/nonportable, but callers to this 257 * routine normally provide temporary on-stack buffers ... 258 */ 259 data_buf = kmalloc(len, GFP_KERNEL); 260 if (data_buf == NULL) 261 return -ENOMEM; 262 263 memset(&mrq, 0, sizeof(struct mmc_request)); 264 memset(&cmd, 0, sizeof(struct mmc_command)); 265 memset(&data, 0, sizeof(struct mmc_data)); 266 267 mrq.cmd = &cmd; 268 mrq.data = &data; 269 270 cmd.opcode = opcode; 271 cmd.arg = 0; 272 273 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 274 * rely on callers to never use this with "native" calls for reading 275 * CSD or CID. Native versions of those commands use the R2 type, 276 * not R1 plus a data block. 277 */ 278 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 279 280 data.blksz = len; 281 data.blocks = 1; 282 data.flags = MMC_DATA_READ; 283 data.sg = &sg; 284 data.sg_len = 1; 285 286 sg_init_one(&sg, data_buf, len); 287 288 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 289 /* 290 * The spec states that CSR and CID accesses have a timeout 291 * of 64 clock cycles. 292 */ 293 data.timeout_ns = 0; 294 data.timeout_clks = 64; 295 } else 296 mmc_set_data_timeout(&data, card); 297 298 mmc_wait_for_req(host, &mrq); 299 300 memcpy(buf, data_buf, len); 301 kfree(data_buf); 302 303 if (cmd.error) 304 return cmd.error; 305 if (data.error) 306 return data.error; 307 308 return 0; 309 } 310 311 int mmc_send_csd(struct mmc_card *card, u32 *csd) 312 { 313 int ret, i; 314 315 if (!mmc_host_is_spi(card->host)) 316 return mmc_send_cxd_native(card->host, card->rca << 16, 317 csd, MMC_SEND_CSD); 318 319 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); 320 if (ret) 321 return ret; 322 323 for (i = 0;i < 4;i++) 324 csd[i] = be32_to_cpu(csd[i]); 325 326 return 0; 327 } 328 329 int mmc_send_cid(struct mmc_host *host, u32 *cid) 330 { 331 int ret, i; 332 333 if (!mmc_host_is_spi(host)) { 334 if (!host->card) 335 return -EINVAL; 336 return mmc_send_cxd_native(host, host->card->rca << 16, 337 cid, MMC_SEND_CID); 338 } 339 340 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); 341 if (ret) 342 return ret; 343 344 for (i = 0;i < 4;i++) 345 cid[i] = be32_to_cpu(cid[i]); 346 347 return 0; 348 } 349 350 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) 351 { 352 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, 353 ext_csd, 512); 354 } 355 356 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 357 { 358 struct mmc_command cmd; 359 int err; 360 361 memset(&cmd, 0, sizeof(struct mmc_command)); 362 363 cmd.opcode = MMC_SPI_READ_OCR; 364 cmd.arg = highcap ? (1 << 30) : 0; 365 cmd.flags = MMC_RSP_SPI_R3; 366 367 err = mmc_wait_for_cmd(host, &cmd, 0); 368 369 *ocrp = cmd.resp[1]; 370 return err; 371 } 372 373 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 374 { 375 struct mmc_command cmd; 376 int err; 377 378 memset(&cmd, 0, sizeof(struct mmc_command)); 379 380 cmd.opcode = MMC_SPI_CRC_ON_OFF; 381 cmd.flags = MMC_RSP_SPI_R1; 382 cmd.arg = use_crc; 383 384 err = mmc_wait_for_cmd(host, &cmd, 0); 385 if (!err) 386 host->use_spi_crc = use_crc; 387 return err; 388 } 389 390 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) 391 { 392 int err; 393 struct mmc_command cmd; 394 u32 status; 395 396 BUG_ON(!card); 397 BUG_ON(!card->host); 398 399 memset(&cmd, 0, sizeof(struct mmc_command)); 400 401 cmd.opcode = MMC_SWITCH; 402 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 403 (index << 16) | 404 (value << 8) | 405 set; 406 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 407 408 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 409 if (err) 410 return err; 411 412 /* Must check status to be sure of no errors */ 413 do { 414 err = mmc_send_status(card, &status); 415 if (err) 416 return err; 417 if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 418 break; 419 if (mmc_host_is_spi(card->host)) 420 break; 421 } while (R1_CURRENT_STATE(status) == 7); 422 423 if (mmc_host_is_spi(card->host)) { 424 if (status & R1_SPI_ILLEGAL_COMMAND) 425 return -EBADMSG; 426 } else { 427 if (status & 0xFDFFA000) 428 printk(KERN_WARNING "%s: unexpected status %#x after " 429 "switch", mmc_hostname(card->host), status); 430 if (status & R1_SWITCH_ERROR) 431 return -EBADMSG; 432 } 433 434 return 0; 435 } 436 437 int mmc_send_status(struct mmc_card *card, u32 *status) 438 { 439 int err; 440 struct mmc_command cmd; 441 442 BUG_ON(!card); 443 BUG_ON(!card->host); 444 445 memset(&cmd, 0, sizeof(struct mmc_command)); 446 447 cmd.opcode = MMC_SEND_STATUS; 448 if (!mmc_host_is_spi(card->host)) 449 cmd.arg = card->rca << 16; 450 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 451 452 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 453 if (err) 454 return err; 455 456 /* NOTE: callers are required to understand the difference 457 * between "native" and SPI format status words! 458 */ 459 if (status) 460 *status = cmd.resp[0]; 461 462 return 0; 463 } 464 465 static int 466 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 467 u8 len) 468 { 469 struct mmc_request mrq; 470 struct mmc_command cmd; 471 struct mmc_data data; 472 struct scatterlist sg; 473 u8 *data_buf; 474 u8 *test_buf; 475 int i, err; 476 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 477 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 478 479 /* dma onto stack is unsafe/nonportable, but callers to this 480 * routine normally provide temporary on-stack buffers ... 481 */ 482 data_buf = kmalloc(len, GFP_KERNEL); 483 if (!data_buf) 484 return -ENOMEM; 485 486 if (len == 8) 487 test_buf = testdata_8bit; 488 else if (len == 4) 489 test_buf = testdata_4bit; 490 else { 491 printk(KERN_ERR "%s: Invalid bus_width %d\n", 492 mmc_hostname(host), len); 493 kfree(data_buf); 494 return -EINVAL; 495 } 496 497 if (opcode == MMC_BUS_TEST_W) 498 memcpy(data_buf, test_buf, len); 499 500 memset(&mrq, 0, sizeof(struct mmc_request)); 501 memset(&cmd, 0, sizeof(struct mmc_command)); 502 memset(&data, 0, sizeof(struct mmc_data)); 503 504 mrq.cmd = &cmd; 505 mrq.data = &data; 506 cmd.opcode = opcode; 507 cmd.arg = 0; 508 509 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 510 * rely on callers to never use this with "native" calls for reading 511 * CSD or CID. Native versions of those commands use the R2 type, 512 * not R1 plus a data block. 513 */ 514 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 515 516 data.blksz = len; 517 data.blocks = 1; 518 if (opcode == MMC_BUS_TEST_R) 519 data.flags = MMC_DATA_READ; 520 else 521 data.flags = MMC_DATA_WRITE; 522 523 data.sg = &sg; 524 data.sg_len = 1; 525 sg_init_one(&sg, data_buf, len); 526 mmc_wait_for_req(host, &mrq); 527 err = 0; 528 if (opcode == MMC_BUS_TEST_R) { 529 for (i = 0; i < len / 4; i++) 530 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 531 err = -EIO; 532 break; 533 } 534 } 535 kfree(data_buf); 536 537 if (cmd.error) 538 return cmd.error; 539 if (data.error) 540 return data.error; 541 542 return err; 543 } 544 545 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 546 { 547 int err, width; 548 549 if (bus_width == MMC_BUS_WIDTH_8) 550 width = 8; 551 else if (bus_width == MMC_BUS_WIDTH_4) 552 width = 4; 553 else if (bus_width == MMC_BUS_WIDTH_1) 554 return 0; /* no need for test */ 555 else 556 return -EINVAL; 557 558 /* 559 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 560 * is a problem. This improves chances that the test will work. 561 */ 562 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 563 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 564 return err; 565 } 566