1 /* 2 * SD Association Host Standard Specification v2.0 controller emulation 3 * 4 * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf 5 * 6 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 7 * Mitsyanko Igor <i.mitsyanko@samsung.com> 8 * Peter A.G. Crosthwaite <peter.crosthwaite@petalogix.com> 9 * 10 * Based on MMC controller for Samsung S5PC1xx-based board emulation 11 * by Alexey Merkulov and Vladimir Monakhov. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 21 * See the GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, see <http://www.gnu.org/licenses/>. 25 */ 26 27 #include "qemu/osdep.h" 28 #include "qemu/units.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 #include "hw/irq.h" 32 #include "hw/qdev-properties.h" 33 #include "sysemu/dma.h" 34 #include "qemu/timer.h" 35 #include "qemu/bitops.h" 36 #include "hw/sd/sdhci.h" 37 #include "migration/vmstate.h" 38 #include "sdhci-internal.h" 39 #include "qemu/log.h" 40 #include "qemu/module.h" 41 #include "trace.h" 42 #include "qom/object.h" 43 44 #define TYPE_SDHCI_BUS "sdhci-bus" 45 /* This is reusing the SDBus typedef from SD_BUS */ 46 DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS, 47 TYPE_SDHCI_BUS) 48 49 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) 50 51 static inline unsigned int sdhci_get_fifolen(SDHCIState *s) 52 { 53 return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH)); 54 } 55 56 /* return true on error */ 57 static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc, 58 uint8_t freq, Error **errp) 59 { 60 if (s->sd_spec_version >= 3) { 61 return false; 62 } 63 switch (freq) { 64 case 0: 65 case 10 ... 63: 66 break; 67 default: 68 error_setg(errp, "SD %s clock frequency can have value" 69 "in range 0-63 only", desc); 70 return true; 71 } 72 return false; 73 } 74 75 static void sdhci_check_capareg(SDHCIState *s, Error **errp) 76 { 77 uint64_t msk = s->capareg; 78 uint32_t val; 79 bool y; 80 81 switch (s->sd_spec_version) { 82 case 4: 83 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4); 84 trace_sdhci_capareg("64-bit system bus (v4)", val); 85 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0); 86 87 val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II); 88 trace_sdhci_capareg("UHS-II", val); 89 msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0); 90 91 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3); 92 trace_sdhci_capareg("ADMA3", val); 93 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0); 94 95 /* fallthrough */ 96 case 3: 97 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT); 98 trace_sdhci_capareg("async interrupt", val); 99 msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0); 100 101 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE); 102 if (val) { 103 error_setg(errp, "slot-type not supported"); 104 return; 105 } 106 trace_sdhci_capareg("slot type", val); 107 msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0); 108 109 if (val != 2) { 110 val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT); 111 trace_sdhci_capareg("8-bit bus", val); 112 } 113 msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0); 114 115 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED); 116 trace_sdhci_capareg("bus speed mask", val); 117 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0); 118 119 val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH); 120 trace_sdhci_capareg("driver strength mask", val); 121 msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0); 122 123 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING); 124 trace_sdhci_capareg("timer re-tuning", val); 125 msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0); 126 127 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING); 128 trace_sdhci_capareg("use SDR50 tuning", val); 129 msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0); 130 131 val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE); 132 trace_sdhci_capareg("re-tuning mode", val); 133 msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0); 134 135 val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT); 136 trace_sdhci_capareg("clock multiplier", val); 137 msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0); 138 139 /* fallthrough */ 140 case 2: /* default version */ 141 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2); 142 trace_sdhci_capareg("ADMA2", val); 143 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0); 144 145 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1); 146 trace_sdhci_capareg("ADMA1", val); 147 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0); 148 149 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT); 150 trace_sdhci_capareg("64-bit system bus (v3)", val); 151 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0); 152 153 /* fallthrough */ 154 case 1: 155 y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT); 156 msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0); 157 158 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ); 159 trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val); 160 if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) { 161 return; 162 } 163 msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0); 164 165 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ); 166 trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val); 167 if (sdhci_check_capab_freq_range(s, "base", val, errp)) { 168 return; 169 } 170 msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0); 171 172 val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH); 173 if (val >= 3) { 174 error_setg(errp, "block size can be 512, 1024 or 2048 only"); 175 return; 176 } 177 trace_sdhci_capareg("max block length", sdhci_get_fifolen(s)); 178 msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0); 179 180 val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED); 181 trace_sdhci_capareg("high speed", val); 182 msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0); 183 184 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA); 185 trace_sdhci_capareg("SDMA", val); 186 msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0); 187 188 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME); 189 trace_sdhci_capareg("suspend/resume", val); 190 msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0); 191 192 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33); 193 trace_sdhci_capareg("3.3v", val); 194 msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0); 195 196 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30); 197 trace_sdhci_capareg("3.0v", val); 198 msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0); 199 200 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18); 201 trace_sdhci_capareg("1.8v", val); 202 msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0); 203 break; 204 205 default: 206 error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version); 207 } 208 if (msk) { 209 qemu_log_mask(LOG_UNIMP, 210 "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk); 211 } 212 } 213 214 static uint8_t sdhci_slotint(SDHCIState *s) 215 { 216 return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) || 217 ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) || 218 ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV)); 219 } 220 221 /* Return true if IRQ was pending and delivered */ 222 static bool sdhci_update_irq(SDHCIState *s) 223 { 224 bool pending = sdhci_slotint(s); 225 226 qemu_set_irq(s->irq, pending); 227 228 return pending; 229 } 230 231 static void sdhci_raise_insertion_irq(void *opaque) 232 { 233 SDHCIState *s = (SDHCIState *)opaque; 234 235 if (s->norintsts & SDHC_NIS_REMOVE) { 236 timer_mod(s->insert_timer, 237 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 238 } else { 239 s->prnsts = 0x1ff0000; 240 if (s->norintstsen & SDHC_NISEN_INSERT) { 241 s->norintsts |= SDHC_NIS_INSERT; 242 } 243 sdhci_update_irq(s); 244 } 245 } 246 247 static void sdhci_set_inserted(DeviceState *dev, bool level) 248 { 249 SDHCIState *s = (SDHCIState *)dev; 250 251 trace_sdhci_set_inserted(level ? "insert" : "eject"); 252 if ((s->norintsts & SDHC_NIS_REMOVE) && level) { 253 /* Give target some time to notice card ejection */ 254 timer_mod(s->insert_timer, 255 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 256 } else { 257 if (level) { 258 s->prnsts = 0x1ff0000; 259 if (s->norintstsen & SDHC_NISEN_INSERT) { 260 s->norintsts |= SDHC_NIS_INSERT; 261 } 262 } else { 263 s->prnsts = 0x1fa0000; 264 s->pwrcon &= ~SDHC_POWER_ON; 265 s->clkcon &= ~SDHC_CLOCK_SDCLK_EN; 266 if (s->norintstsen & SDHC_NISEN_REMOVE) { 267 s->norintsts |= SDHC_NIS_REMOVE; 268 } 269 } 270 sdhci_update_irq(s); 271 } 272 } 273 274 static void sdhci_set_readonly(DeviceState *dev, bool level) 275 { 276 SDHCIState *s = (SDHCIState *)dev; 277 278 if (level) { 279 s->prnsts &= ~SDHC_WRITE_PROTECT; 280 } else { 281 /* Write enabled */ 282 s->prnsts |= SDHC_WRITE_PROTECT; 283 } 284 } 285 286 static void sdhci_reset(SDHCIState *s) 287 { 288 DeviceState *dev = DEVICE(s); 289 290 timer_del(s->insert_timer); 291 timer_del(s->transfer_timer); 292 293 /* Set all registers to 0. Capabilities/Version registers are not cleared 294 * and assumed to always preserve their value, given to them during 295 * initialization */ 296 memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); 297 298 /* Reset other state based on current card insertion/readonly status */ 299 sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); 300 sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); 301 302 s->data_count = 0; 303 s->stopped_state = sdhc_not_stopped; 304 s->pending_insert_state = false; 305 } 306 307 static void sdhci_poweron_reset(DeviceState *dev) 308 { 309 /* QOM (ie power-on) reset. This is identical to reset 310 * commanded via device register apart from handling of the 311 * 'pending insert on powerup' quirk. 312 */ 313 SDHCIState *s = (SDHCIState *)dev; 314 315 sdhci_reset(s); 316 317 if (s->pending_insert_quirk) { 318 s->pending_insert_state = true; 319 } 320 } 321 322 static void sdhci_data_transfer(void *opaque); 323 324 #define BLOCK_SIZE_MASK (4 * KiB - 1) 325 326 static void sdhci_send_command(SDHCIState *s) 327 { 328 SDRequest request; 329 uint8_t response[16]; 330 int rlen; 331 bool timeout = false; 332 333 s->errintsts = 0; 334 s->acmd12errsts = 0; 335 request.cmd = s->cmdreg >> 8; 336 request.arg = s->argument; 337 338 trace_sdhci_send_command(request.cmd, request.arg); 339 rlen = sdbus_do_command(&s->sdbus, &request, response); 340 341 if (s->cmdreg & SDHC_CMD_RESPONSE) { 342 if (rlen == 4) { 343 s->rspreg[0] = ldl_be_p(response); 344 s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0; 345 trace_sdhci_response4(s->rspreg[0]); 346 } else if (rlen == 16) { 347 s->rspreg[0] = ldl_be_p(&response[11]); 348 s->rspreg[1] = ldl_be_p(&response[7]); 349 s->rspreg[2] = ldl_be_p(&response[3]); 350 s->rspreg[3] = (response[0] << 16) | (response[1] << 8) | 351 response[2]; 352 trace_sdhci_response16(s->rspreg[3], s->rspreg[2], 353 s->rspreg[1], s->rspreg[0]); 354 } else { 355 timeout = true; 356 trace_sdhci_error("timeout waiting for command response"); 357 if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) { 358 s->errintsts |= SDHC_EIS_CMDTIMEOUT; 359 s->norintsts |= SDHC_NIS_ERR; 360 } 361 } 362 363 if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 364 (s->norintstsen & SDHC_NISEN_TRSCMP) && 365 (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) { 366 s->norintsts |= SDHC_NIS_TRSCMP; 367 } 368 } 369 370 if (s->norintstsen & SDHC_NISEN_CMDCMP) { 371 s->norintsts |= SDHC_NIS_CMDCMP; 372 } 373 374 sdhci_update_irq(s); 375 376 if (!timeout && (s->blksize & BLOCK_SIZE_MASK) && 377 (s->cmdreg & SDHC_CMD_DATA_PRESENT)) { 378 s->data_count = 0; 379 sdhci_data_transfer(s); 380 } 381 } 382 383 static void sdhci_end_transfer(SDHCIState *s) 384 { 385 /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */ 386 if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) { 387 SDRequest request; 388 uint8_t response[16]; 389 390 request.cmd = 0x0C; 391 request.arg = 0; 392 trace_sdhci_end_transfer(request.cmd, request.arg); 393 sdbus_do_command(&s->sdbus, &request, response); 394 /* Auto CMD12 response goes to the upper Response register */ 395 s->rspreg[3] = ldl_be_p(response); 396 } 397 398 s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE | 399 SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT | 400 SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE); 401 402 if (s->norintstsen & SDHC_NISEN_TRSCMP) { 403 s->norintsts |= SDHC_NIS_TRSCMP; 404 } 405 406 sdhci_update_irq(s); 407 } 408 409 /* 410 * Programmed i/o data transfer 411 */ 412 413 /* Fill host controller's read buffer with BLKSIZE bytes of data from card */ 414 static void sdhci_read_block_from_card(SDHCIState *s) 415 { 416 const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK; 417 418 if ((s->trnmod & SDHC_TRNS_MULTI) && 419 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) { 420 return; 421 } 422 423 if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 424 /* Device is not in tuning */ 425 sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size); 426 } 427 428 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 429 /* Device is in tuning */ 430 s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK; 431 s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK; 432 s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ | 433 SDHC_DATA_INHIBIT); 434 goto read_done; 435 } 436 437 /* New data now available for READ through Buffer Port Register */ 438 s->prnsts |= SDHC_DATA_AVAILABLE; 439 if (s->norintstsen & SDHC_NISEN_RBUFRDY) { 440 s->norintsts |= SDHC_NIS_RBUFRDY; 441 } 442 443 /* Clear DAT line active status if that was the last block */ 444 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 445 ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) { 446 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 447 } 448 449 /* If stop at block gap request was set and it's not the last block of 450 * data - generate Block Event interrupt */ 451 if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) && 452 s->blkcnt != 1) { 453 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 454 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 455 s->norintsts |= SDHC_EIS_BLKGAP; 456 } 457 } 458 459 read_done: 460 sdhci_update_irq(s); 461 } 462 463 /* Read @size byte of data from host controller @s BUFFER DATA PORT register */ 464 static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) 465 { 466 uint32_t value = 0; 467 int i; 468 469 /* first check that a valid data exists in host controller input buffer */ 470 if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) { 471 trace_sdhci_error("read from empty buffer"); 472 return 0; 473 } 474 475 for (i = 0; i < size; i++) { 476 assert(s->data_count < s->buf_maxsz); 477 value |= s->fifo_buffer[s->data_count] << i * 8; 478 s->data_count++; 479 /* check if we've read all valid data (blksize bytes) from buffer */ 480 if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) { 481 trace_sdhci_read_dataport(s->data_count); 482 s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */ 483 s->data_count = 0; /* next buff read must start at position [0] */ 484 485 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 486 s->blkcnt--; 487 } 488 489 /* if that was the last block of data */ 490 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 491 ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) || 492 /* stop at gap request */ 493 (s->stopped_state == sdhc_gap_read && 494 !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) { 495 sdhci_end_transfer(s); 496 } else { /* if there are more data, read next block from card */ 497 sdhci_read_block_from_card(s); 498 } 499 break; 500 } 501 } 502 503 return value; 504 } 505 506 /* Write data from host controller FIFO to card */ 507 static void sdhci_write_block_to_card(SDHCIState *s) 508 { 509 if (s->prnsts & SDHC_SPACE_AVAILABLE) { 510 if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 511 s->norintsts |= SDHC_NIS_WBUFRDY; 512 } 513 sdhci_update_irq(s); 514 return; 515 } 516 517 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 518 if (s->blkcnt == 0) { 519 return; 520 } else { 521 s->blkcnt--; 522 } 523 } 524 525 sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK); 526 527 /* Next data can be written through BUFFER DATORT register */ 528 s->prnsts |= SDHC_SPACE_AVAILABLE; 529 530 /* Finish transfer if that was the last block of data */ 531 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 532 ((s->trnmod & SDHC_TRNS_MULTI) && 533 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) { 534 sdhci_end_transfer(s); 535 } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 536 s->norintsts |= SDHC_NIS_WBUFRDY; 537 } 538 539 /* Generate Block Gap Event if requested and if not the last block */ 540 if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) && 541 s->blkcnt > 0) { 542 s->prnsts &= ~SDHC_DOING_WRITE; 543 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 544 s->norintsts |= SDHC_EIS_BLKGAP; 545 } 546 sdhci_end_transfer(s); 547 } 548 549 sdhci_update_irq(s); 550 } 551 552 /* Write @size bytes of @value data to host controller @s Buffer Data Port 553 * register */ 554 static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) 555 { 556 unsigned i; 557 558 /* Check that there is free space left in a buffer */ 559 if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { 560 trace_sdhci_error("Can't write to data buffer: buffer full"); 561 return; 562 } 563 564 for (i = 0; i < size; i++) { 565 assert(s->data_count < s->buf_maxsz); 566 s->fifo_buffer[s->data_count] = value & 0xFF; 567 s->data_count++; 568 value >>= 8; 569 if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) { 570 trace_sdhci_write_dataport(s->data_count); 571 s->data_count = 0; 572 s->prnsts &= ~SDHC_SPACE_AVAILABLE; 573 if (s->prnsts & SDHC_DOING_WRITE) { 574 sdhci_write_block_to_card(s); 575 } 576 } 577 } 578 } 579 580 /* 581 * Single DMA data transfer 582 */ 583 584 /* Multi block SDMA transfer */ 585 static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) 586 { 587 bool page_aligned = false; 588 unsigned int begin; 589 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 590 uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12); 591 uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk); 592 593 if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) { 594 qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n"); 595 return; 596 } 597 598 /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for 599 * possible stop at page boundary if initial address is not page aligned, 600 * allow them to work properly */ 601 if ((s->sdmasysad % boundary_chk) == 0) { 602 page_aligned = true; 603 } 604 605 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; 606 if (s->trnmod & SDHC_TRNS_READ) { 607 s->prnsts |= SDHC_DOING_READ; 608 while (s->blkcnt) { 609 if (s->data_count == 0) { 610 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 611 } 612 begin = s->data_count; 613 if (((boundary_count + begin) < block_size) && page_aligned) { 614 s->data_count = boundary_count + begin; 615 boundary_count = 0; 616 } else { 617 s->data_count = block_size; 618 boundary_count -= block_size - begin; 619 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 620 s->blkcnt--; 621 } 622 } 623 dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], 624 s->data_count - begin, MEMTXATTRS_UNSPECIFIED); 625 s->sdmasysad += s->data_count - begin; 626 if (s->data_count == block_size) { 627 s->data_count = 0; 628 } 629 if (page_aligned && boundary_count == 0) { 630 break; 631 } 632 } 633 } else { 634 s->prnsts |= SDHC_DOING_WRITE; 635 while (s->blkcnt) { 636 begin = s->data_count; 637 if (((boundary_count + begin) < block_size) && page_aligned) { 638 s->data_count = boundary_count + begin; 639 boundary_count = 0; 640 } else { 641 s->data_count = block_size; 642 boundary_count -= block_size - begin; 643 } 644 dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], 645 s->data_count - begin, MEMTXATTRS_UNSPECIFIED); 646 s->sdmasysad += s->data_count - begin; 647 if (s->data_count == block_size) { 648 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 649 s->data_count = 0; 650 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 651 s->blkcnt--; 652 } 653 } 654 if (page_aligned && boundary_count == 0) { 655 break; 656 } 657 } 658 } 659 660 if (s->blkcnt == 0) { 661 sdhci_end_transfer(s); 662 } else { 663 if (s->norintstsen & SDHC_NISEN_DMA) { 664 s->norintsts |= SDHC_NIS_DMA; 665 } 666 sdhci_update_irq(s); 667 } 668 } 669 670 /* single block SDMA transfer */ 671 static void sdhci_sdma_transfer_single_block(SDHCIState *s) 672 { 673 uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK; 674 675 if (s->trnmod & SDHC_TRNS_READ) { 676 sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); 677 dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, 678 MEMTXATTRS_UNSPECIFIED); 679 } else { 680 dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, 681 MEMTXATTRS_UNSPECIFIED); 682 sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); 683 } 684 s->blkcnt--; 685 686 sdhci_end_transfer(s); 687 } 688 689 typedef struct ADMADescr { 690 hwaddr addr; 691 uint16_t length; 692 uint8_t attr; 693 uint8_t incr; 694 } ADMADescr; 695 696 static void get_adma_description(SDHCIState *s, ADMADescr *dscr) 697 { 698 uint32_t adma1 = 0; 699 uint64_t adma2 = 0; 700 hwaddr entry_addr = (hwaddr)s->admasysaddr; 701 switch (SDHC_DMA_TYPE(s->hostctl1)) { 702 case SDHC_CTRL_ADMA2_32: 703 dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2), 704 MEMTXATTRS_UNSPECIFIED); 705 adma2 = le64_to_cpu(adma2); 706 /* The spec does not specify endianness of descriptor table. 707 * We currently assume that it is LE. 708 */ 709 dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull; 710 dscr->length = (uint16_t)extract64(adma2, 16, 16); 711 dscr->attr = (uint8_t)extract64(adma2, 0, 7); 712 dscr->incr = 8; 713 break; 714 case SDHC_CTRL_ADMA1_32: 715 dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1), 716 MEMTXATTRS_UNSPECIFIED); 717 adma1 = le32_to_cpu(adma1); 718 dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); 719 dscr->attr = (uint8_t)extract32(adma1, 0, 7); 720 dscr->incr = 4; 721 if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) { 722 dscr->length = (uint16_t)extract32(adma1, 12, 16); 723 } else { 724 dscr->length = 4 * KiB; 725 } 726 break; 727 case SDHC_CTRL_ADMA2_64: 728 dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1, 729 MEMTXATTRS_UNSPECIFIED); 730 dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2, 731 MEMTXATTRS_UNSPECIFIED); 732 dscr->length = le16_to_cpu(dscr->length); 733 dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8, 734 MEMTXATTRS_UNSPECIFIED); 735 dscr->addr = le64_to_cpu(dscr->addr); 736 dscr->attr &= (uint8_t) ~0xC0; 737 dscr->incr = 12; 738 break; 739 } 740 } 741 742 /* Advanced DMA data transfer */ 743 744 static void sdhci_do_adma(SDHCIState *s) 745 { 746 unsigned int begin, length; 747 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 748 const MemTxAttrs attrs = { .memory = true }; 749 ADMADescr dscr = {}; 750 MemTxResult res = MEMTX_ERROR; 751 int i; 752 753 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { 754 /* Stop Multiple Transfer */ 755 sdhci_end_transfer(s); 756 return; 757 } 758 759 for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) { 760 s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH; 761 762 get_adma_description(s, &dscr); 763 trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr); 764 765 if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) { 766 /* Indicate that error occurred in ST_FDS state */ 767 s->admaerr &= ~SDHC_ADMAERR_STATE_MASK; 768 s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS; 769 770 /* Generate ADMA error interrupt */ 771 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 772 s->errintsts |= SDHC_EIS_ADMAERR; 773 s->norintsts |= SDHC_NIS_ERR; 774 } 775 776 sdhci_update_irq(s); 777 return; 778 } 779 780 length = dscr.length ? dscr.length : 64 * KiB; 781 782 switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) { 783 case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */ 784 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; 785 if (s->trnmod & SDHC_TRNS_READ) { 786 s->prnsts |= SDHC_DOING_READ; 787 while (length) { 788 if (s->data_count == 0) { 789 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 790 } 791 begin = s->data_count; 792 if ((length + begin) < block_size) { 793 s->data_count = length + begin; 794 length = 0; 795 } else { 796 s->data_count = block_size; 797 length -= block_size - begin; 798 } 799 res = dma_memory_write(s->dma_as, dscr.addr, 800 &s->fifo_buffer[begin], 801 s->data_count - begin, 802 attrs); 803 if (res != MEMTX_OK) { 804 break; 805 } 806 dscr.addr += s->data_count - begin; 807 if (s->data_count == block_size) { 808 s->data_count = 0; 809 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 810 s->blkcnt--; 811 if (s->blkcnt == 0) { 812 break; 813 } 814 } 815 } 816 } 817 } else { 818 s->prnsts |= SDHC_DOING_WRITE; 819 while (length) { 820 begin = s->data_count; 821 if ((length + begin) < block_size) { 822 s->data_count = length + begin; 823 length = 0; 824 } else { 825 s->data_count = block_size; 826 length -= block_size - begin; 827 } 828 res = dma_memory_read(s->dma_as, dscr.addr, 829 &s->fifo_buffer[begin], 830 s->data_count - begin, 831 attrs); 832 if (res != MEMTX_OK) { 833 break; 834 } 835 dscr.addr += s->data_count - begin; 836 if (s->data_count == block_size) { 837 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 838 s->data_count = 0; 839 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 840 s->blkcnt--; 841 if (s->blkcnt == 0) { 842 break; 843 } 844 } 845 } 846 } 847 } 848 if (res != MEMTX_OK) { 849 s->data_count = 0; 850 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 851 trace_sdhci_error("Set ADMA error flag"); 852 s->errintsts |= SDHC_EIS_ADMAERR; 853 s->norintsts |= SDHC_NIS_ERR; 854 } 855 sdhci_update_irq(s); 856 } else { 857 s->admasysaddr += dscr.incr; 858 } 859 break; 860 case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ 861 s->admasysaddr = dscr.addr; 862 trace_sdhci_adma("link", s->admasysaddr); 863 break; 864 default: 865 s->admasysaddr += dscr.incr; 866 break; 867 } 868 869 if (dscr.attr & SDHC_ADMA_ATTR_INT) { 870 trace_sdhci_adma("interrupt", s->admasysaddr); 871 if (s->norintstsen & SDHC_NISEN_DMA) { 872 s->norintsts |= SDHC_NIS_DMA; 873 } 874 875 if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) { 876 /* IRQ delivered, reschedule current transfer */ 877 break; 878 } 879 } 880 881 /* ADMA transfer terminates if blkcnt == 0 or by END attribute */ 882 if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 883 (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) { 884 trace_sdhci_adma_transfer_completed(); 885 if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) && 886 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 887 s->blkcnt != 0)) { 888 trace_sdhci_error("SD/MMC host ADMA length mismatch"); 889 s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH | 890 SDHC_ADMAERR_STATE_ST_TFR; 891 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 892 trace_sdhci_error("Set ADMA error flag"); 893 s->errintsts |= SDHC_EIS_ADMAERR; 894 s->norintsts |= SDHC_NIS_ERR; 895 } 896 897 sdhci_update_irq(s); 898 } 899 sdhci_end_transfer(s); 900 return; 901 } 902 903 } 904 905 /* we have unfinished business - reschedule to continue ADMA */ 906 timer_mod(s->transfer_timer, 907 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY); 908 } 909 910 /* Perform data transfer according to controller configuration */ 911 912 static void sdhci_data_transfer(void *opaque) 913 { 914 SDHCIState *s = (SDHCIState *)opaque; 915 916 if (s->trnmod & SDHC_TRNS_DMA) { 917 switch (SDHC_DMA_TYPE(s->hostctl1)) { 918 case SDHC_CTRL_SDMA: 919 if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { 920 sdhci_sdma_transfer_single_block(s); 921 } else { 922 sdhci_sdma_transfer_multi_blocks(s); 923 } 924 925 break; 926 case SDHC_CTRL_ADMA1_32: 927 if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) { 928 trace_sdhci_error("ADMA1 not supported"); 929 break; 930 } 931 932 sdhci_do_adma(s); 933 break; 934 case SDHC_CTRL_ADMA2_32: 935 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) { 936 trace_sdhci_error("ADMA2 not supported"); 937 break; 938 } 939 940 sdhci_do_adma(s); 941 break; 942 case SDHC_CTRL_ADMA2_64: 943 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) || 944 !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) { 945 trace_sdhci_error("64 bit ADMA not supported"); 946 break; 947 } 948 949 sdhci_do_adma(s); 950 break; 951 default: 952 trace_sdhci_error("Unsupported DMA type"); 953 break; 954 } 955 } else { 956 if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) { 957 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | 958 SDHC_DAT_LINE_ACTIVE; 959 sdhci_read_block_from_card(s); 960 } else { 961 s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE | 962 SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT; 963 sdhci_write_block_to_card(s); 964 } 965 } 966 } 967 968 static bool sdhci_can_issue_command(SDHCIState *s) 969 { 970 if (!SDHC_CLOCK_IS_ON(s->clkcon) || 971 (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) && 972 ((s->cmdreg & SDHC_CMD_DATA_PRESENT) || 973 ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY && 974 !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) { 975 return false; 976 } 977 978 return true; 979 } 980 981 /* The Buffer Data Port register must be accessed in sequential and 982 * continuous manner */ 983 static inline bool 984 sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) 985 { 986 if ((s->data_count & 0x3) != byte_num) { 987 qemu_log_mask(LOG_GUEST_ERROR, 988 "SDHCI: Non-sequential access to Buffer Data Port" 989 " register is prohibited\n"); 990 return false; 991 } 992 return true; 993 } 994 995 static void sdhci_resume_pending_transfer(SDHCIState *s) 996 { 997 timer_del(s->transfer_timer); 998 sdhci_data_transfer(s); 999 } 1000 1001 static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size) 1002 { 1003 SDHCIState *s = (SDHCIState *)opaque; 1004 uint32_t ret = 0; 1005 1006 if (timer_pending(s->transfer_timer)) { 1007 sdhci_resume_pending_transfer(s); 1008 } 1009 1010 switch (offset & ~0x3) { 1011 case SDHC_SYSAD: 1012 ret = s->sdmasysad; 1013 break; 1014 case SDHC_BLKSIZE: 1015 ret = s->blksize | (s->blkcnt << 16); 1016 break; 1017 case SDHC_ARGUMENT: 1018 ret = s->argument; 1019 break; 1020 case SDHC_TRNMOD: 1021 ret = s->trnmod | (s->cmdreg << 16); 1022 break; 1023 case SDHC_RSPREG0 ... SDHC_RSPREG3: 1024 ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2]; 1025 break; 1026 case SDHC_BDATA: 1027 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1028 ret = sdhci_read_dataport(s, size); 1029 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1030 return ret; 1031 } 1032 break; 1033 case SDHC_PRNSTS: 1034 ret = s->prnsts; 1035 ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL, 1036 sdbus_get_dat_lines(&s->sdbus)); 1037 ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL, 1038 sdbus_get_cmd_line(&s->sdbus)); 1039 break; 1040 case SDHC_HOSTCTL: 1041 ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) | 1042 (s->wakcon << 24); 1043 break; 1044 case SDHC_CLKCON: 1045 ret = s->clkcon | (s->timeoutcon << 16); 1046 break; 1047 case SDHC_NORINTSTS: 1048 ret = s->norintsts | (s->errintsts << 16); 1049 break; 1050 case SDHC_NORINTSTSEN: 1051 ret = s->norintstsen | (s->errintstsen << 16); 1052 break; 1053 case SDHC_NORINTSIGEN: 1054 ret = s->norintsigen | (s->errintsigen << 16); 1055 break; 1056 case SDHC_ACMD12ERRSTS: 1057 ret = s->acmd12errsts | (s->hostctl2 << 16); 1058 break; 1059 case SDHC_CAPAB: 1060 ret = (uint32_t)s->capareg; 1061 break; 1062 case SDHC_CAPAB + 4: 1063 ret = (uint32_t)(s->capareg >> 32); 1064 break; 1065 case SDHC_MAXCURR: 1066 ret = (uint32_t)s->maxcurr; 1067 break; 1068 case SDHC_MAXCURR + 4: 1069 ret = (uint32_t)(s->maxcurr >> 32); 1070 break; 1071 case SDHC_ADMAERR: 1072 ret = s->admaerr; 1073 break; 1074 case SDHC_ADMASYSADDR: 1075 ret = (uint32_t)s->admasysaddr; 1076 break; 1077 case SDHC_ADMASYSADDR + 4: 1078 ret = (uint32_t)(s->admasysaddr >> 32); 1079 break; 1080 case SDHC_SLOT_INT_STATUS: 1081 ret = (s->version << 16) | sdhci_slotint(s); 1082 break; 1083 default: 1084 qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " " 1085 "not implemented\n", size, offset); 1086 break; 1087 } 1088 1089 ret >>= (offset & 0x3) * 8; 1090 ret &= (1ULL << (size * 8)) - 1; 1091 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1092 return ret; 1093 } 1094 1095 static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value) 1096 { 1097 if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) { 1098 return; 1099 } 1100 s->blkgap = value & SDHC_STOP_AT_GAP_REQ; 1101 1102 if ((value & SDHC_CONTINUE_REQ) && s->stopped_state && 1103 (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) { 1104 if (s->stopped_state == sdhc_gap_read) { 1105 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ; 1106 sdhci_read_block_from_card(s); 1107 } else { 1108 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE; 1109 sdhci_write_block_to_card(s); 1110 } 1111 s->stopped_state = sdhc_not_stopped; 1112 } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) { 1113 if (s->prnsts & SDHC_DOING_READ) { 1114 s->stopped_state = sdhc_gap_read; 1115 } else if (s->prnsts & SDHC_DOING_WRITE) { 1116 s->stopped_state = sdhc_gap_write; 1117 } 1118 } 1119 } 1120 1121 static inline void sdhci_reset_write(SDHCIState *s, uint8_t value) 1122 { 1123 switch (value) { 1124 case SDHC_RESET_ALL: 1125 sdhci_reset(s); 1126 break; 1127 case SDHC_RESET_CMD: 1128 s->prnsts &= ~SDHC_CMD_INHIBIT; 1129 s->norintsts &= ~SDHC_NIS_CMDCMP; 1130 break; 1131 case SDHC_RESET_DATA: 1132 s->data_count = 0; 1133 s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE | 1134 SDHC_DOING_READ | SDHC_DOING_WRITE | 1135 SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE); 1136 s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ); 1137 s->stopped_state = sdhc_not_stopped; 1138 s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY | 1139 SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP); 1140 break; 1141 } 1142 } 1143 1144 static void 1145 sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1146 { 1147 SDHCIState *s = (SDHCIState *)opaque; 1148 unsigned shift = 8 * (offset & 0x3); 1149 uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); 1150 uint32_t value = val; 1151 value <<= shift; 1152 1153 if (timer_pending(s->transfer_timer)) { 1154 sdhci_resume_pending_transfer(s); 1155 } 1156 1157 switch (offset & ~0x3) { 1158 case SDHC_SYSAD: 1159 if (!TRANSFERRING_DATA(s->prnsts)) { 1160 s->sdmasysad = (s->sdmasysad & mask) | value; 1161 MASKED_WRITE(s->sdmasysad, mask, value); 1162 /* Writing to last byte of sdmasysad might trigger transfer */ 1163 if (!(mask & 0xFF000000) && s->blkcnt && 1164 (s->blksize & BLOCK_SIZE_MASK) && 1165 SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { 1166 if (s->trnmod & SDHC_TRNS_MULTI) { 1167 sdhci_sdma_transfer_multi_blocks(s); 1168 } else { 1169 sdhci_sdma_transfer_single_block(s); 1170 } 1171 } 1172 } 1173 break; 1174 case SDHC_BLKSIZE: 1175 if (!TRANSFERRING_DATA(s->prnsts)) { 1176 uint16_t blksize = s->blksize; 1177 1178 /* 1179 * [14:12] SDMA Buffer Boundary 1180 * [11:00] Transfer Block Size 1181 */ 1182 MASKED_WRITE(s->blksize, mask, extract32(value, 0, 15)); 1183 MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); 1184 1185 /* Limit block size to the maximum buffer size */ 1186 if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { 1187 qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " 1188 "the maximum buffer 0x%x\n", __func__, s->blksize, 1189 s->buf_maxsz); 1190 1191 s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); 1192 } 1193 1194 /* 1195 * If the block size is programmed to a different value from 1196 * the previous one, reset the data pointer of s->fifo_buffer[] 1197 * so that s->fifo_buffer[] can be filled in using the new block 1198 * size in the next transfer. 1199 */ 1200 if (blksize != s->blksize) { 1201 s->data_count = 0; 1202 } 1203 } 1204 1205 break; 1206 case SDHC_ARGUMENT: 1207 MASKED_WRITE(s->argument, mask, value); 1208 break; 1209 case SDHC_TRNMOD: 1210 /* DMA can be enabled only if it is supported as indicated by 1211 * capabilities register */ 1212 if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { 1213 value &= ~SDHC_TRNS_DMA; 1214 } 1215 1216 /* TRNMOD writes are inhibited while Command Inhibit (DAT) is true */ 1217 if (s->prnsts & SDHC_DATA_INHIBIT) { 1218 mask |= 0xffff; 1219 } 1220 1221 MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); 1222 MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); 1223 1224 /* Writing to the upper byte of CMDREG triggers SD command generation */ 1225 if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { 1226 break; 1227 } 1228 1229 sdhci_send_command(s); 1230 break; 1231 case SDHC_BDATA: 1232 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1233 sdhci_write_dataport(s, value >> shift, size); 1234 } 1235 break; 1236 case SDHC_HOSTCTL: 1237 if (!(mask & 0xFF0000)) { 1238 sdhci_blkgap_write(s, value >> 16); 1239 } 1240 MASKED_WRITE(s->hostctl1, mask, value); 1241 MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); 1242 MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); 1243 if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || 1244 !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { 1245 s->pwrcon &= ~SDHC_POWER_ON; 1246 } 1247 break; 1248 case SDHC_CLKCON: 1249 if (!(mask & 0xFF000000)) { 1250 sdhci_reset_write(s, value >> 24); 1251 } 1252 MASKED_WRITE(s->clkcon, mask, value); 1253 MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); 1254 if (s->clkcon & SDHC_CLOCK_INT_EN) { 1255 s->clkcon |= SDHC_CLOCK_INT_STABLE; 1256 } else { 1257 s->clkcon &= ~SDHC_CLOCK_INT_STABLE; 1258 } 1259 break; 1260 case SDHC_NORINTSTS: 1261 if (s->norintstsen & SDHC_NISEN_CARDINT) { 1262 value &= ~SDHC_NIS_CARDINT; 1263 } 1264 s->norintsts &= mask | ~value; 1265 s->errintsts &= (mask >> 16) | ~(value >> 16); 1266 if (s->errintsts) { 1267 s->norintsts |= SDHC_NIS_ERR; 1268 } else { 1269 s->norintsts &= ~SDHC_NIS_ERR; 1270 } 1271 sdhci_update_irq(s); 1272 break; 1273 case SDHC_NORINTSTSEN: 1274 MASKED_WRITE(s->norintstsen, mask, value); 1275 MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); 1276 s->norintsts &= s->norintstsen; 1277 s->errintsts &= s->errintstsen; 1278 if (s->errintsts) { 1279 s->norintsts |= SDHC_NIS_ERR; 1280 } else { 1281 s->norintsts &= ~SDHC_NIS_ERR; 1282 } 1283 /* Quirk for Raspberry Pi: pending card insert interrupt 1284 * appears when first enabled after power on */ 1285 if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { 1286 assert(s->pending_insert_quirk); 1287 s->norintsts |= SDHC_NIS_INSERT; 1288 s->pending_insert_state = false; 1289 } 1290 sdhci_update_irq(s); 1291 break; 1292 case SDHC_NORINTSIGEN: 1293 MASKED_WRITE(s->norintsigen, mask, value); 1294 MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); 1295 sdhci_update_irq(s); 1296 break; 1297 case SDHC_ADMAERR: 1298 MASKED_WRITE(s->admaerr, mask, value); 1299 break; 1300 case SDHC_ADMASYSADDR: 1301 s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | 1302 (uint64_t)mask)) | (uint64_t)value; 1303 break; 1304 case SDHC_ADMASYSADDR + 4: 1305 s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | 1306 ((uint64_t)mask << 32))) | ((uint64_t)value << 32); 1307 break; 1308 case SDHC_FEAER: 1309 s->acmd12errsts |= value; 1310 s->errintsts |= (value >> 16) & s->errintstsen; 1311 if (s->acmd12errsts) { 1312 s->errintsts |= SDHC_EIS_CMD12ERR; 1313 } 1314 if (s->errintsts) { 1315 s->norintsts |= SDHC_NIS_ERR; 1316 } 1317 sdhci_update_irq(s); 1318 break; 1319 case SDHC_ACMD12ERRSTS: 1320 MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX); 1321 if (s->uhs_mode >= UHS_I) { 1322 MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16); 1323 1324 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) { 1325 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V); 1326 } else { 1327 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V); 1328 } 1329 } 1330 break; 1331 1332 case SDHC_CAPAB: 1333 case SDHC_CAPAB + 4: 1334 case SDHC_MAXCURR: 1335 case SDHC_MAXCURR + 4: 1336 qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx 1337 " <- 0x%08x read-only\n", size, offset, value >> shift); 1338 break; 1339 1340 default: 1341 qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x " 1342 "not implemented\n", size, offset, value >> shift); 1343 break; 1344 } 1345 trace_sdhci_access("wr", size << 3, offset, "<-", 1346 value >> shift, value >> shift); 1347 } 1348 1349 static const MemoryRegionOps sdhci_mmio_le_ops = { 1350 .read = sdhci_read, 1351 .write = sdhci_write, 1352 .valid = { 1353 .min_access_size = 1, 1354 .max_access_size = 4, 1355 .unaligned = false 1356 }, 1357 .endianness = DEVICE_LITTLE_ENDIAN, 1358 }; 1359 1360 static const MemoryRegionOps sdhci_mmio_be_ops = { 1361 .read = sdhci_read, 1362 .write = sdhci_write, 1363 .impl = { 1364 .min_access_size = 4, 1365 .max_access_size = 4, 1366 }, 1367 .valid = { 1368 .min_access_size = 1, 1369 .max_access_size = 4, 1370 .unaligned = false 1371 }, 1372 .endianness = DEVICE_BIG_ENDIAN, 1373 }; 1374 1375 static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) 1376 { 1377 ERRP_GUARD(); 1378 1379 switch (s->sd_spec_version) { 1380 case 2 ... 3: 1381 break; 1382 default: 1383 error_setg(errp, "Only Spec v2/v3 are supported"); 1384 return; 1385 } 1386 s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1); 1387 1388 sdhci_check_capareg(s, errp); 1389 if (*errp) { 1390 return; 1391 } 1392 } 1393 1394 /* --- qdev common --- */ 1395 1396 void sdhci_initfn(SDHCIState *s) 1397 { 1398 qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); 1399 1400 s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); 1401 s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); 1402 1403 s->io_ops = &sdhci_mmio_le_ops; 1404 } 1405 1406 void sdhci_uninitfn(SDHCIState *s) 1407 { 1408 timer_free(s->insert_timer); 1409 timer_free(s->transfer_timer); 1410 1411 g_free(s->fifo_buffer); 1412 s->fifo_buffer = NULL; 1413 } 1414 1415 void sdhci_common_realize(SDHCIState *s, Error **errp) 1416 { 1417 ERRP_GUARD(); 1418 1419 switch (s->endianness) { 1420 case DEVICE_LITTLE_ENDIAN: 1421 /* s->io_ops is little endian by default */ 1422 break; 1423 case DEVICE_BIG_ENDIAN: 1424 if (s->io_ops != &sdhci_mmio_le_ops) { 1425 error_setg(errp, "SD controller doesn't support big endianness"); 1426 return; 1427 } 1428 s->io_ops = &sdhci_mmio_be_ops; 1429 break; 1430 default: 1431 error_setg(errp, "Incorrect endianness"); 1432 return; 1433 } 1434 1435 sdhci_init_readonly_registers(s, errp); 1436 if (*errp) { 1437 return; 1438 } 1439 1440 s->buf_maxsz = sdhci_get_fifolen(s); 1441 s->fifo_buffer = g_malloc0(s->buf_maxsz); 1442 1443 memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci", 1444 SDHC_REGISTERS_MAP_SIZE); 1445 } 1446 1447 void sdhci_common_unrealize(SDHCIState *s) 1448 { 1449 /* This function is expected to be called only once for each class: 1450 * - SysBus: via DeviceClass->unrealize(), 1451 * - PCI: via PCIDeviceClass->exit(). 1452 * However to avoid double-free and/or use-after-free we still nullify 1453 * this variable (better safe than sorry!). */ 1454 g_free(s->fifo_buffer); 1455 s->fifo_buffer = NULL; 1456 } 1457 1458 static bool sdhci_pending_insert_vmstate_needed(void *opaque) 1459 { 1460 SDHCIState *s = opaque; 1461 1462 return s->pending_insert_state; 1463 } 1464 1465 static const VMStateDescription sdhci_pending_insert_vmstate = { 1466 .name = "sdhci/pending-insert", 1467 .version_id = 1, 1468 .minimum_version_id = 1, 1469 .needed = sdhci_pending_insert_vmstate_needed, 1470 .fields = (const VMStateField[]) { 1471 VMSTATE_BOOL(pending_insert_state, SDHCIState), 1472 VMSTATE_END_OF_LIST() 1473 }, 1474 }; 1475 1476 const VMStateDescription sdhci_vmstate = { 1477 .name = "sdhci", 1478 .version_id = 1, 1479 .minimum_version_id = 1, 1480 .fields = (const VMStateField[]) { 1481 VMSTATE_UINT32(sdmasysad, SDHCIState), 1482 VMSTATE_UINT16(blksize, SDHCIState), 1483 VMSTATE_UINT16(blkcnt, SDHCIState), 1484 VMSTATE_UINT32(argument, SDHCIState), 1485 VMSTATE_UINT16(trnmod, SDHCIState), 1486 VMSTATE_UINT16(cmdreg, SDHCIState), 1487 VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4), 1488 VMSTATE_UINT32(prnsts, SDHCIState), 1489 VMSTATE_UINT8(hostctl1, SDHCIState), 1490 VMSTATE_UINT8(pwrcon, SDHCIState), 1491 VMSTATE_UINT8(blkgap, SDHCIState), 1492 VMSTATE_UINT8(wakcon, SDHCIState), 1493 VMSTATE_UINT16(clkcon, SDHCIState), 1494 VMSTATE_UINT8(timeoutcon, SDHCIState), 1495 VMSTATE_UINT8(admaerr, SDHCIState), 1496 VMSTATE_UINT16(norintsts, SDHCIState), 1497 VMSTATE_UINT16(errintsts, SDHCIState), 1498 VMSTATE_UINT16(norintstsen, SDHCIState), 1499 VMSTATE_UINT16(errintstsen, SDHCIState), 1500 VMSTATE_UINT16(norintsigen, SDHCIState), 1501 VMSTATE_UINT16(errintsigen, SDHCIState), 1502 VMSTATE_UINT16(acmd12errsts, SDHCIState), 1503 VMSTATE_UINT16(data_count, SDHCIState), 1504 VMSTATE_UINT64(admasysaddr, SDHCIState), 1505 VMSTATE_UINT8(stopped_state, SDHCIState), 1506 VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz), 1507 VMSTATE_TIMER_PTR(insert_timer, SDHCIState), 1508 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), 1509 VMSTATE_END_OF_LIST() 1510 }, 1511 .subsections = (const VMStateDescription * const []) { 1512 &sdhci_pending_insert_vmstate, 1513 NULL 1514 }, 1515 }; 1516 1517 void sdhci_common_class_init(ObjectClass *klass, void *data) 1518 { 1519 DeviceClass *dc = DEVICE_CLASS(klass); 1520 1521 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1522 dc->vmsd = &sdhci_vmstate; 1523 device_class_set_legacy_reset(dc, sdhci_poweron_reset); 1524 } 1525 1526 /* --- qdev SysBus --- */ 1527 1528 static Property sdhci_sysbus_properties[] = { 1529 DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), 1530 DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, 1531 false), 1532 DEFINE_PROP_LINK("dma", SDHCIState, 1533 dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), 1534 DEFINE_PROP_END_OF_LIST(), 1535 }; 1536 1537 static void sdhci_sysbus_init(Object *obj) 1538 { 1539 SDHCIState *s = SYSBUS_SDHCI(obj); 1540 1541 sdhci_initfn(s); 1542 } 1543 1544 static void sdhci_sysbus_finalize(Object *obj) 1545 { 1546 SDHCIState *s = SYSBUS_SDHCI(obj); 1547 1548 if (s->dma_mr) { 1549 object_unparent(OBJECT(s->dma_mr)); 1550 } 1551 1552 sdhci_uninitfn(s); 1553 } 1554 1555 static void sdhci_sysbus_realize(DeviceState *dev, Error **errp) 1556 { 1557 ERRP_GUARD(); 1558 SDHCIState *s = SYSBUS_SDHCI(dev); 1559 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1560 1561 sdhci_common_realize(s, errp); 1562 if (*errp) { 1563 return; 1564 } 1565 1566 if (s->dma_mr) { 1567 s->dma_as = &s->sysbus_dma_as; 1568 address_space_init(s->dma_as, s->dma_mr, "sdhci-dma"); 1569 } else { 1570 /* use system_memory() if property "dma" not set */ 1571 s->dma_as = &address_space_memory; 1572 } 1573 1574 sysbus_init_irq(sbd, &s->irq); 1575 1576 sysbus_init_mmio(sbd, &s->iomem); 1577 } 1578 1579 static void sdhci_sysbus_unrealize(DeviceState *dev) 1580 { 1581 SDHCIState *s = SYSBUS_SDHCI(dev); 1582 1583 sdhci_common_unrealize(s); 1584 1585 if (s->dma_mr) { 1586 address_space_destroy(s->dma_as); 1587 } 1588 } 1589 1590 static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) 1591 { 1592 DeviceClass *dc = DEVICE_CLASS(klass); 1593 1594 device_class_set_props(dc, sdhci_sysbus_properties); 1595 dc->realize = sdhci_sysbus_realize; 1596 dc->unrealize = sdhci_sysbus_unrealize; 1597 1598 sdhci_common_class_init(klass, data); 1599 } 1600 1601 static const TypeInfo sdhci_sysbus_info = { 1602 .name = TYPE_SYSBUS_SDHCI, 1603 .parent = TYPE_SYS_BUS_DEVICE, 1604 .instance_size = sizeof(SDHCIState), 1605 .instance_init = sdhci_sysbus_init, 1606 .instance_finalize = sdhci_sysbus_finalize, 1607 .class_init = sdhci_sysbus_class_init, 1608 }; 1609 1610 /* --- qdev bus master --- */ 1611 1612 static void sdhci_bus_class_init(ObjectClass *klass, void *data) 1613 { 1614 SDBusClass *sbc = SD_BUS_CLASS(klass); 1615 1616 sbc->set_inserted = sdhci_set_inserted; 1617 sbc->set_readonly = sdhci_set_readonly; 1618 } 1619 1620 static const TypeInfo sdhci_bus_info = { 1621 .name = TYPE_SDHCI_BUS, 1622 .parent = TYPE_SD_BUS, 1623 .instance_size = sizeof(SDBus), 1624 .class_init = sdhci_bus_class_init, 1625 }; 1626 1627 /* --- qdev i.MX eSDHC --- */ 1628 1629 #define USDHC_MIX_CTRL 0x48 1630 1631 #define USDHC_VENDOR_SPEC 0xc0 1632 #define USDHC_IMX_FRC_SDCLK_ON (1 << 8) 1633 1634 #define USDHC_DLL_CTRL 0x60 1635 1636 #define USDHC_TUNING_CTRL 0xcc 1637 #define USDHC_TUNE_CTRL_STATUS 0x68 1638 #define USDHC_WTMK_LVL 0x44 1639 1640 /* Undocumented register used by guests working around erratum ERR004536 */ 1641 #define USDHC_UNDOCUMENTED_REG27 0x6c 1642 1643 #define USDHC_CTRL_4BITBUS (0x1 << 1) 1644 #define USDHC_CTRL_8BITBUS (0x2 << 1) 1645 1646 #define USDHC_PRNSTS_SDSTB (1 << 3) 1647 1648 static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) 1649 { 1650 SDHCIState *s = SYSBUS_SDHCI(opaque); 1651 uint32_t ret; 1652 uint16_t hostctl1; 1653 1654 switch (offset) { 1655 default: 1656 return sdhci_read(opaque, offset, size); 1657 1658 case SDHC_HOSTCTL: 1659 /* 1660 * For a detailed explanation on the following bit 1661 * manipulation code see comments in a similar part of 1662 * usdhc_write() 1663 */ 1664 hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3); 1665 1666 if (s->hostctl1 & SDHC_CTRL_8BITBUS) { 1667 hostctl1 |= USDHC_CTRL_8BITBUS; 1668 } 1669 1670 if (s->hostctl1 & SDHC_CTRL_4BITBUS) { 1671 hostctl1 |= USDHC_CTRL_4BITBUS; 1672 } 1673 1674 ret = hostctl1; 1675 ret |= (uint32_t)s->blkgap << 16; 1676 ret |= (uint32_t)s->wakcon << 24; 1677 1678 break; 1679 1680 case SDHC_PRNSTS: 1681 /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ 1682 ret = sdhci_read(opaque, offset, size) & ~USDHC_PRNSTS_SDSTB; 1683 if (s->clkcon & SDHC_CLOCK_INT_STABLE) { 1684 ret |= USDHC_PRNSTS_SDSTB; 1685 } 1686 break; 1687 1688 case USDHC_VENDOR_SPEC: 1689 ret = s->vendor_spec; 1690 break; 1691 case USDHC_DLL_CTRL: 1692 case USDHC_TUNE_CTRL_STATUS: 1693 case USDHC_UNDOCUMENTED_REG27: 1694 case USDHC_TUNING_CTRL: 1695 case USDHC_MIX_CTRL: 1696 case USDHC_WTMK_LVL: 1697 ret = 0; 1698 break; 1699 } 1700 1701 return ret; 1702 } 1703 1704 static void 1705 usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1706 { 1707 SDHCIState *s = SYSBUS_SDHCI(opaque); 1708 uint8_t hostctl1; 1709 uint32_t value = (uint32_t)val; 1710 1711 switch (offset) { 1712 case USDHC_DLL_CTRL: 1713 case USDHC_TUNE_CTRL_STATUS: 1714 case USDHC_UNDOCUMENTED_REG27: 1715 case USDHC_TUNING_CTRL: 1716 case USDHC_WTMK_LVL: 1717 break; 1718 1719 case USDHC_VENDOR_SPEC: 1720 s->vendor_spec = value; 1721 switch (s->vendor) { 1722 case SDHCI_VENDOR_IMX: 1723 if (value & USDHC_IMX_FRC_SDCLK_ON) { 1724 s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; 1725 } else { 1726 s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; 1727 } 1728 break; 1729 default: 1730 break; 1731 } 1732 break; 1733 1734 case SDHC_HOSTCTL: 1735 /* 1736 * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL) 1737 * 1738 * 7 6 5 4 3 2 1 0 1739 * |-----------+--------+--------+-----------+----------+---------| 1740 * | Card | Card | Endian | DATA3 | Data | Led | 1741 * | Detect | Detect | Mode | as Card | Transfer | Control | 1742 * | Signal | Test | | Detection | Width | | 1743 * | Selection | Level | | Pin | | | 1744 * |-----------+--------+--------+-----------+----------+---------| 1745 * 1746 * and 0x29 1747 * 1748 * 15 10 9 8 1749 * |----------+------| 1750 * | Reserved | DMA | 1751 * | | Sel. | 1752 * | | | 1753 * |----------+------| 1754 * 1755 * and here's what SDCHI spec expects those offsets to be: 1756 * 1757 * 0x28 (Host Control Register) 1758 * 1759 * 7 6 5 4 3 2 1 0 1760 * |--------+--------+----------+------+--------+----------+---------| 1761 * | Card | Card | Extended | DMA | High | Data | LED | 1762 * | Detect | Detect | Data | Sel. | Speed | Transfer | Control | 1763 * | Signal | Test | Transfer | | Enable | Width | | 1764 * | Sel. | Level | Width | | | | | 1765 * |--------+--------+----------+------+--------+----------+---------| 1766 * 1767 * and 0x29 (Power Control Register) 1768 * 1769 * |----------------------------------| 1770 * | Power Control Register | 1771 * | | 1772 * | Description omitted, | 1773 * | since it has no analog in ESDHCI | 1774 * | | 1775 * |----------------------------------| 1776 * 1777 * Since offsets 0x2A and 0x2B should be compatible between 1778 * both IP specs we only need to reconcile least 16-bit of the 1779 * word we've been given. 1780 */ 1781 1782 /* 1783 * First, save bits 7 6 and 0 since they are identical 1784 */ 1785 hostctl1 = value & (SDHC_CTRL_LED | 1786 SDHC_CTRL_CDTEST_INS | 1787 SDHC_CTRL_CDTEST_EN); 1788 /* 1789 * Second, split "Data Transfer Width" from bits 2 and 1 in to 1790 * bits 5 and 1 1791 */ 1792 if (value & USDHC_CTRL_8BITBUS) { 1793 hostctl1 |= SDHC_CTRL_8BITBUS; 1794 } 1795 1796 if (value & USDHC_CTRL_4BITBUS) { 1797 hostctl1 |= USDHC_CTRL_4BITBUS; 1798 } 1799 1800 /* 1801 * Third, move DMA select from bits 9 and 8 to bits 4 and 3 1802 */ 1803 hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3)); 1804 1805 /* 1806 * Now place the corrected value into low 16-bit of the value 1807 * we are going to give standard SDHCI write function 1808 * 1809 * NOTE: This transformation should be the inverse of what can 1810 * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux 1811 * kernel 1812 */ 1813 value &= ~UINT16_MAX; 1814 value |= hostctl1; 1815 value |= (uint16_t)s->pwrcon << 8; 1816 1817 sdhci_write(opaque, offset, value, size); 1818 break; 1819 1820 case USDHC_MIX_CTRL: 1821 /* 1822 * So, when SD/MMC stack in Linux tries to write to "Transfer 1823 * Mode Register", ESDHC i.MX quirk code will translate it 1824 * into a write to ESDHC_MIX_CTRL, so we do the opposite in 1825 * order to get where we started 1826 * 1827 * Note that Auto CMD23 Enable bit is located in a wrong place 1828 * on i.MX, but since it is not used by QEMU we do not care. 1829 * 1830 * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...) 1831 * here because it will result in a call to 1832 * sdhci_send_command(s) which we don't want. 1833 * 1834 */ 1835 s->trnmod = value & UINT16_MAX; 1836 break; 1837 case SDHC_TRNMOD: 1838 /* 1839 * Similar to above, but this time a write to "Command 1840 * Register" will be translated into a 4-byte write to 1841 * "Transfer Mode register" where lower 16-bit of value would 1842 * be set to zero. So what we do is fill those bits with 1843 * cached value from s->trnmod and let the SDHCI 1844 * infrastructure handle the rest 1845 */ 1846 sdhci_write(opaque, offset, val | s->trnmod, size); 1847 break; 1848 case SDHC_BLKSIZE: 1849 /* 1850 * ESDHCI does not implement "Host SDMA Buffer Boundary", and 1851 * Linux driver will try to zero this field out which will 1852 * break the rest of SDHCI emulation. 1853 * 1854 * Linux defaults to maximum possible setting (512K boundary) 1855 * and it seems to be the only option that i.MX IP implements, 1856 * so we artificially set it to that value. 1857 */ 1858 val |= 0x7 << 12; 1859 /* FALLTHROUGH */ 1860 default: 1861 sdhci_write(opaque, offset, val, size); 1862 break; 1863 } 1864 } 1865 1866 static const MemoryRegionOps usdhc_mmio_ops = { 1867 .read = usdhc_read, 1868 .write = usdhc_write, 1869 .valid = { 1870 .min_access_size = 1, 1871 .max_access_size = 4, 1872 .unaligned = false 1873 }, 1874 .endianness = DEVICE_LITTLE_ENDIAN, 1875 }; 1876 1877 static void imx_usdhc_init(Object *obj) 1878 { 1879 SDHCIState *s = SYSBUS_SDHCI(obj); 1880 1881 s->io_ops = &usdhc_mmio_ops; 1882 s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; 1883 } 1884 1885 static const TypeInfo imx_usdhc_info = { 1886 .name = TYPE_IMX_USDHC, 1887 .parent = TYPE_SYSBUS_SDHCI, 1888 .instance_init = imx_usdhc_init, 1889 }; 1890 1891 /* --- qdev Samsung s3c --- */ 1892 1893 #define S3C_SDHCI_CONTROL2 0x80 1894 #define S3C_SDHCI_CONTROL3 0x84 1895 #define S3C_SDHCI_CONTROL4 0x8c 1896 1897 static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size) 1898 { 1899 uint64_t ret; 1900 1901 switch (offset) { 1902 case S3C_SDHCI_CONTROL2: 1903 case S3C_SDHCI_CONTROL3: 1904 case S3C_SDHCI_CONTROL4: 1905 /* ignore */ 1906 ret = 0; 1907 break; 1908 default: 1909 ret = sdhci_read(opaque, offset, size); 1910 break; 1911 } 1912 1913 return ret; 1914 } 1915 1916 static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val, 1917 unsigned size) 1918 { 1919 switch (offset) { 1920 case S3C_SDHCI_CONTROL2: 1921 case S3C_SDHCI_CONTROL3: 1922 case S3C_SDHCI_CONTROL4: 1923 /* ignore */ 1924 break; 1925 default: 1926 sdhci_write(opaque, offset, val, size); 1927 break; 1928 } 1929 } 1930 1931 static const MemoryRegionOps sdhci_s3c_mmio_ops = { 1932 .read = sdhci_s3c_read, 1933 .write = sdhci_s3c_write, 1934 .valid = { 1935 .min_access_size = 1, 1936 .max_access_size = 4, 1937 .unaligned = false 1938 }, 1939 .endianness = DEVICE_LITTLE_ENDIAN, 1940 }; 1941 1942 static void sdhci_s3c_init(Object *obj) 1943 { 1944 SDHCIState *s = SYSBUS_SDHCI(obj); 1945 1946 s->io_ops = &sdhci_s3c_mmio_ops; 1947 } 1948 1949 static const TypeInfo sdhci_s3c_info = { 1950 .name = TYPE_S3C_SDHCI , 1951 .parent = TYPE_SYSBUS_SDHCI, 1952 .instance_init = sdhci_s3c_init, 1953 }; 1954 1955 static void sdhci_register_types(void) 1956 { 1957 type_register_static(&sdhci_sysbus_info); 1958 type_register_static(&sdhci_bus_info); 1959 type_register_static(&imx_usdhc_info); 1960 type_register_static(&sdhci_s3c_info); 1961 } 1962 1963 type_init(sdhci_register_types) 1964