1 /* 2 * SD Association Host Standard Specification v2.0 controller emulation 3 * 4 * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf 5 * 6 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 7 * Mitsyanko Igor <i.mitsyanko@samsung.com> 8 * Peter A.G. Crosthwaite <peter.crosthwaite@petalogix.com> 9 * 10 * Based on MMC controller for Samsung S5PC1xx-based board emulation 11 * by Alexey Merkulov and Vladimir Monakhov. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 21 * See the GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, see <http://www.gnu.org/licenses/>. 25 */ 26 27 #include "qemu/osdep.h" 28 #include "qemu/units.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 #include "hw/irq.h" 32 #include "hw/qdev-properties.h" 33 #include "sysemu/dma.h" 34 #include "qemu/timer.h" 35 #include "qemu/bitops.h" 36 #include "hw/sd/sdhci.h" 37 #include "migration/vmstate.h" 38 #include "sdhci-internal.h" 39 #include "qemu/log.h" 40 #include "qemu/module.h" 41 #include "trace.h" 42 #include "qom/object.h" 43 44 #define TYPE_SDHCI_BUS "sdhci-bus" 45 /* This is reusing the SDBus typedef from SD_BUS */ 46 DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS, 47 TYPE_SDHCI_BUS) 48 49 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) 50 51 static inline unsigned int sdhci_get_fifolen(SDHCIState *s) 52 { 53 return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH)); 54 } 55 56 /* return true on error */ 57 static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc, 58 uint8_t freq, Error **errp) 59 { 60 if (s->sd_spec_version >= 3) { 61 return false; 62 } 63 switch (freq) { 64 case 0: 65 case 10 ... 63: 66 break; 67 default: 68 error_setg(errp, "SD %s clock frequency can have value" 69 "in range 0-63 only", desc); 70 return true; 71 } 72 return false; 73 } 74 75 static void sdhci_check_capareg(SDHCIState *s, Error **errp) 76 { 77 uint64_t msk = s->capareg; 78 uint32_t val; 79 bool y; 80 81 switch (s->sd_spec_version) { 82 case 4: 83 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4); 84 trace_sdhci_capareg("64-bit system bus (v4)", val); 85 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0); 86 87 val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II); 88 trace_sdhci_capareg("UHS-II", val); 89 msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0); 90 91 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3); 92 trace_sdhci_capareg("ADMA3", val); 93 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0); 94 95 /* fallthrough */ 96 case 3: 97 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT); 98 trace_sdhci_capareg("async interrupt", val); 99 msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0); 100 101 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE); 102 if (val) { 103 error_setg(errp, "slot-type not supported"); 104 return; 105 } 106 trace_sdhci_capareg("slot type", val); 107 msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0); 108 109 if (val != 2) { 110 val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT); 111 trace_sdhci_capareg("8-bit bus", val); 112 } 113 msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0); 114 115 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED); 116 trace_sdhci_capareg("bus speed mask", val); 117 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0); 118 119 val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH); 120 trace_sdhci_capareg("driver strength mask", val); 121 msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0); 122 123 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING); 124 trace_sdhci_capareg("timer re-tuning", val); 125 msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0); 126 127 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING); 128 trace_sdhci_capareg("use SDR50 tuning", val); 129 msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0); 130 131 val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE); 132 trace_sdhci_capareg("re-tuning mode", val); 133 msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0); 134 135 val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT); 136 trace_sdhci_capareg("clock multiplier", val); 137 msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0); 138 139 /* fallthrough */ 140 case 2: /* default version */ 141 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2); 142 trace_sdhci_capareg("ADMA2", val); 143 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0); 144 145 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1); 146 trace_sdhci_capareg("ADMA1", val); 147 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0); 148 149 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT); 150 trace_sdhci_capareg("64-bit system bus (v3)", val); 151 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0); 152 153 /* fallthrough */ 154 case 1: 155 y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT); 156 msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0); 157 158 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ); 159 trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val); 160 if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) { 161 return; 162 } 163 msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0); 164 165 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ); 166 trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val); 167 if (sdhci_check_capab_freq_range(s, "base", val, errp)) { 168 return; 169 } 170 msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0); 171 172 val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH); 173 if (val >= 3) { 174 error_setg(errp, "block size can be 512, 1024 or 2048 only"); 175 return; 176 } 177 trace_sdhci_capareg("max block length", sdhci_get_fifolen(s)); 178 msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0); 179 180 val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED); 181 trace_sdhci_capareg("high speed", val); 182 msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0); 183 184 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA); 185 trace_sdhci_capareg("SDMA", val); 186 msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0); 187 188 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME); 189 trace_sdhci_capareg("suspend/resume", val); 190 msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0); 191 192 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33); 193 trace_sdhci_capareg("3.3v", val); 194 msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0); 195 196 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30); 197 trace_sdhci_capareg("3.0v", val); 198 msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0); 199 200 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18); 201 trace_sdhci_capareg("1.8v", val); 202 msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0); 203 break; 204 205 default: 206 error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version); 207 } 208 if (msk) { 209 qemu_log_mask(LOG_UNIMP, 210 "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk); 211 } 212 } 213 214 static uint8_t sdhci_slotint(SDHCIState *s) 215 { 216 return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) || 217 ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) || 218 ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV)); 219 } 220 221 /* Return true if IRQ was pending and delivered */ 222 static bool sdhci_update_irq(SDHCIState *s) 223 { 224 bool pending = sdhci_slotint(s); 225 226 qemu_set_irq(s->irq, pending); 227 228 return pending; 229 } 230 231 static void sdhci_raise_insertion_irq(void *opaque) 232 { 233 SDHCIState *s = (SDHCIState *)opaque; 234 235 if (s->norintsts & SDHC_NIS_REMOVE) { 236 timer_mod(s->insert_timer, 237 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 238 } else { 239 s->prnsts = 0x1ff0000; 240 if (s->norintstsen & SDHC_NISEN_INSERT) { 241 s->norintsts |= SDHC_NIS_INSERT; 242 } 243 sdhci_update_irq(s); 244 } 245 } 246 247 static void sdhci_set_inserted(DeviceState *dev, bool level) 248 { 249 SDHCIState *s = (SDHCIState *)dev; 250 251 trace_sdhci_set_inserted(level ? "insert" : "eject"); 252 if ((s->norintsts & SDHC_NIS_REMOVE) && level) { 253 /* Give target some time to notice card ejection */ 254 timer_mod(s->insert_timer, 255 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 256 } else { 257 if (level) { 258 s->prnsts = 0x1ff0000; 259 if (s->norintstsen & SDHC_NISEN_INSERT) { 260 s->norintsts |= SDHC_NIS_INSERT; 261 } 262 } else { 263 s->prnsts = 0x1fa0000; 264 s->pwrcon &= ~SDHC_POWER_ON; 265 s->clkcon &= ~SDHC_CLOCK_SDCLK_EN; 266 if (s->norintstsen & SDHC_NISEN_REMOVE) { 267 s->norintsts |= SDHC_NIS_REMOVE; 268 } 269 } 270 sdhci_update_irq(s); 271 } 272 } 273 274 static void sdhci_set_readonly(DeviceState *dev, bool level) 275 { 276 SDHCIState *s = (SDHCIState *)dev; 277 278 if (level) { 279 s->prnsts &= ~SDHC_WRITE_PROTECT; 280 } else { 281 /* Write enabled */ 282 s->prnsts |= SDHC_WRITE_PROTECT; 283 } 284 } 285 286 static void sdhci_reset(SDHCIState *s) 287 { 288 DeviceState *dev = DEVICE(s); 289 290 timer_del(s->insert_timer); 291 timer_del(s->transfer_timer); 292 293 /* Set all registers to 0. Capabilities/Version registers are not cleared 294 * and assumed to always preserve their value, given to them during 295 * initialization */ 296 memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); 297 298 /* Reset other state based on current card insertion/readonly status */ 299 sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); 300 sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); 301 302 s->data_count = 0; 303 s->stopped_state = sdhc_not_stopped; 304 s->pending_insert_state = false; 305 } 306 307 static void sdhci_poweron_reset(DeviceState *dev) 308 { 309 /* QOM (ie power-on) reset. This is identical to reset 310 * commanded via device register apart from handling of the 311 * 'pending insert on powerup' quirk. 312 */ 313 SDHCIState *s = (SDHCIState *)dev; 314 315 sdhci_reset(s); 316 317 if (s->pending_insert_quirk) { 318 s->pending_insert_state = true; 319 } 320 } 321 322 static void sdhci_data_transfer(void *opaque); 323 324 static void sdhci_send_command(SDHCIState *s) 325 { 326 SDRequest request; 327 uint8_t response[16]; 328 int rlen; 329 bool timeout = false; 330 331 s->errintsts = 0; 332 s->acmd12errsts = 0; 333 request.cmd = s->cmdreg >> 8; 334 request.arg = s->argument; 335 336 trace_sdhci_send_command(request.cmd, request.arg); 337 rlen = sdbus_do_command(&s->sdbus, &request, response); 338 339 if (s->cmdreg & SDHC_CMD_RESPONSE) { 340 if (rlen == 4) { 341 s->rspreg[0] = ldl_be_p(response); 342 s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0; 343 trace_sdhci_response4(s->rspreg[0]); 344 } else if (rlen == 16) { 345 s->rspreg[0] = ldl_be_p(&response[11]); 346 s->rspreg[1] = ldl_be_p(&response[7]); 347 s->rspreg[2] = ldl_be_p(&response[3]); 348 s->rspreg[3] = (response[0] << 16) | (response[1] << 8) | 349 response[2]; 350 trace_sdhci_response16(s->rspreg[3], s->rspreg[2], 351 s->rspreg[1], s->rspreg[0]); 352 } else { 353 timeout = true; 354 trace_sdhci_error("timeout waiting for command response"); 355 if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) { 356 s->errintsts |= SDHC_EIS_CMDTIMEOUT; 357 s->norintsts |= SDHC_NIS_ERR; 358 } 359 } 360 361 if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 362 (s->norintstsen & SDHC_NISEN_TRSCMP) && 363 (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) { 364 s->norintsts |= SDHC_NIS_TRSCMP; 365 } 366 } 367 368 if (s->norintstsen & SDHC_NISEN_CMDCMP) { 369 s->norintsts |= SDHC_NIS_CMDCMP; 370 } 371 372 sdhci_update_irq(s); 373 374 if (!timeout && s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) { 375 s->data_count = 0; 376 sdhci_data_transfer(s); 377 } 378 } 379 380 static void sdhci_end_transfer(SDHCIState *s) 381 { 382 /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */ 383 if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) { 384 SDRequest request; 385 uint8_t response[16]; 386 387 request.cmd = 0x0C; 388 request.arg = 0; 389 trace_sdhci_end_transfer(request.cmd, request.arg); 390 sdbus_do_command(&s->sdbus, &request, response); 391 /* Auto CMD12 response goes to the upper Response register */ 392 s->rspreg[3] = ldl_be_p(response); 393 } 394 395 s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE | 396 SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT | 397 SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE); 398 399 if (s->norintstsen & SDHC_NISEN_TRSCMP) { 400 s->norintsts |= SDHC_NIS_TRSCMP; 401 } 402 403 sdhci_update_irq(s); 404 } 405 406 /* 407 * Programmed i/o data transfer 408 */ 409 #define BLOCK_SIZE_MASK (4 * KiB - 1) 410 411 /* Fill host controller's read buffer with BLKSIZE bytes of data from card */ 412 static void sdhci_read_block_from_card(SDHCIState *s) 413 { 414 const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK; 415 416 if ((s->trnmod & SDHC_TRNS_MULTI) && 417 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) { 418 return; 419 } 420 421 if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 422 /* Device is not in tuning */ 423 sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size); 424 } 425 426 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 427 /* Device is in tuning */ 428 s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK; 429 s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK; 430 s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ | 431 SDHC_DATA_INHIBIT); 432 goto read_done; 433 } 434 435 /* New data now available for READ through Buffer Port Register */ 436 s->prnsts |= SDHC_DATA_AVAILABLE; 437 if (s->norintstsen & SDHC_NISEN_RBUFRDY) { 438 s->norintsts |= SDHC_NIS_RBUFRDY; 439 } 440 441 /* Clear DAT line active status if that was the last block */ 442 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 443 ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) { 444 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 445 } 446 447 /* If stop at block gap request was set and it's not the last block of 448 * data - generate Block Event interrupt */ 449 if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) && 450 s->blkcnt != 1) { 451 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 452 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 453 s->norintsts |= SDHC_EIS_BLKGAP; 454 } 455 } 456 457 read_done: 458 sdhci_update_irq(s); 459 } 460 461 /* Read @size byte of data from host controller @s BUFFER DATA PORT register */ 462 static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) 463 { 464 uint32_t value = 0; 465 int i; 466 467 /* first check that a valid data exists in host controller input buffer */ 468 if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) { 469 trace_sdhci_error("read from empty buffer"); 470 return 0; 471 } 472 473 for (i = 0; i < size; i++) { 474 value |= s->fifo_buffer[s->data_count] << i * 8; 475 s->data_count++; 476 /* check if we've read all valid data (blksize bytes) from buffer */ 477 if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) { 478 trace_sdhci_read_dataport(s->data_count); 479 s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */ 480 s->data_count = 0; /* next buff read must start at position [0] */ 481 482 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 483 s->blkcnt--; 484 } 485 486 /* if that was the last block of data */ 487 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 488 ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) || 489 /* stop at gap request */ 490 (s->stopped_state == sdhc_gap_read && 491 !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) { 492 sdhci_end_transfer(s); 493 } else { /* if there are more data, read next block from card */ 494 sdhci_read_block_from_card(s); 495 } 496 break; 497 } 498 } 499 500 return value; 501 } 502 503 /* Write data from host controller FIFO to card */ 504 static void sdhci_write_block_to_card(SDHCIState *s) 505 { 506 if (s->prnsts & SDHC_SPACE_AVAILABLE) { 507 if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 508 s->norintsts |= SDHC_NIS_WBUFRDY; 509 } 510 sdhci_update_irq(s); 511 return; 512 } 513 514 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 515 if (s->blkcnt == 0) { 516 return; 517 } else { 518 s->blkcnt--; 519 } 520 } 521 522 sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK); 523 524 /* Next data can be written through BUFFER DATORT register */ 525 s->prnsts |= SDHC_SPACE_AVAILABLE; 526 527 /* Finish transfer if that was the last block of data */ 528 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 529 ((s->trnmod & SDHC_TRNS_MULTI) && 530 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) { 531 sdhci_end_transfer(s); 532 } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 533 s->norintsts |= SDHC_NIS_WBUFRDY; 534 } 535 536 /* Generate Block Gap Event if requested and if not the last block */ 537 if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) && 538 s->blkcnt > 0) { 539 s->prnsts &= ~SDHC_DOING_WRITE; 540 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 541 s->norintsts |= SDHC_EIS_BLKGAP; 542 } 543 sdhci_end_transfer(s); 544 } 545 546 sdhci_update_irq(s); 547 } 548 549 /* Write @size bytes of @value data to host controller @s Buffer Data Port 550 * register */ 551 static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) 552 { 553 unsigned i; 554 555 /* Check that there is free space left in a buffer */ 556 if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { 557 trace_sdhci_error("Can't write to data buffer: buffer full"); 558 return; 559 } 560 561 for (i = 0; i < size; i++) { 562 s->fifo_buffer[s->data_count] = value & 0xFF; 563 s->data_count++; 564 value >>= 8; 565 if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) { 566 trace_sdhci_write_dataport(s->data_count); 567 s->data_count = 0; 568 s->prnsts &= ~SDHC_SPACE_AVAILABLE; 569 if (s->prnsts & SDHC_DOING_WRITE) { 570 sdhci_write_block_to_card(s); 571 } 572 } 573 } 574 } 575 576 /* 577 * Single DMA data transfer 578 */ 579 580 /* Multi block SDMA transfer */ 581 static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) 582 { 583 bool page_aligned = false; 584 unsigned int begin; 585 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 586 uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12); 587 uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk); 588 589 if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) { 590 qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n"); 591 return; 592 } 593 594 /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for 595 * possible stop at page boundary if initial address is not page aligned, 596 * allow them to work properly */ 597 if ((s->sdmasysad % boundary_chk) == 0) { 598 page_aligned = true; 599 } 600 601 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; 602 if (s->trnmod & SDHC_TRNS_READ) { 603 s->prnsts |= SDHC_DOING_READ; 604 while (s->blkcnt) { 605 if (s->data_count == 0) { 606 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 607 } 608 begin = s->data_count; 609 if (((boundary_count + begin) < block_size) && page_aligned) { 610 s->data_count = boundary_count + begin; 611 boundary_count = 0; 612 } else { 613 s->data_count = block_size; 614 boundary_count -= block_size - begin; 615 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 616 s->blkcnt--; 617 } 618 } 619 dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], 620 s->data_count - begin, MEMTXATTRS_UNSPECIFIED); 621 s->sdmasysad += s->data_count - begin; 622 if (s->data_count == block_size) { 623 s->data_count = 0; 624 } 625 if (page_aligned && boundary_count == 0) { 626 break; 627 } 628 } 629 } else { 630 s->prnsts |= SDHC_DOING_WRITE; 631 while (s->blkcnt) { 632 begin = s->data_count; 633 if (((boundary_count + begin) < block_size) && page_aligned) { 634 s->data_count = boundary_count + begin; 635 boundary_count = 0; 636 } else { 637 s->data_count = block_size; 638 boundary_count -= block_size - begin; 639 } 640 dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], 641 s->data_count - begin, MEMTXATTRS_UNSPECIFIED); 642 s->sdmasysad += s->data_count - begin; 643 if (s->data_count == block_size) { 644 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 645 s->data_count = 0; 646 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 647 s->blkcnt--; 648 } 649 } 650 if (page_aligned && boundary_count == 0) { 651 break; 652 } 653 } 654 } 655 656 if (s->blkcnt == 0) { 657 sdhci_end_transfer(s); 658 } else { 659 if (s->norintstsen & SDHC_NISEN_DMA) { 660 s->norintsts |= SDHC_NIS_DMA; 661 } 662 sdhci_update_irq(s); 663 } 664 } 665 666 /* single block SDMA transfer */ 667 static void sdhci_sdma_transfer_single_block(SDHCIState *s) 668 { 669 uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK; 670 671 if (s->trnmod & SDHC_TRNS_READ) { 672 sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); 673 dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, 674 MEMTXATTRS_UNSPECIFIED); 675 } else { 676 dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, 677 MEMTXATTRS_UNSPECIFIED); 678 sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); 679 } 680 s->blkcnt--; 681 682 sdhci_end_transfer(s); 683 } 684 685 typedef struct ADMADescr { 686 hwaddr addr; 687 uint16_t length; 688 uint8_t attr; 689 uint8_t incr; 690 } ADMADescr; 691 692 static void get_adma_description(SDHCIState *s, ADMADescr *dscr) 693 { 694 uint32_t adma1 = 0; 695 uint64_t adma2 = 0; 696 hwaddr entry_addr = (hwaddr)s->admasysaddr; 697 switch (SDHC_DMA_TYPE(s->hostctl1)) { 698 case SDHC_CTRL_ADMA2_32: 699 dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2), 700 MEMTXATTRS_UNSPECIFIED); 701 adma2 = le64_to_cpu(adma2); 702 /* The spec does not specify endianness of descriptor table. 703 * We currently assume that it is LE. 704 */ 705 dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull; 706 dscr->length = (uint16_t)extract64(adma2, 16, 16); 707 dscr->attr = (uint8_t)extract64(adma2, 0, 7); 708 dscr->incr = 8; 709 break; 710 case SDHC_CTRL_ADMA1_32: 711 dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1), 712 MEMTXATTRS_UNSPECIFIED); 713 adma1 = le32_to_cpu(adma1); 714 dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); 715 dscr->attr = (uint8_t)extract32(adma1, 0, 7); 716 dscr->incr = 4; 717 if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) { 718 dscr->length = (uint16_t)extract32(adma1, 12, 16); 719 } else { 720 dscr->length = 4 * KiB; 721 } 722 break; 723 case SDHC_CTRL_ADMA2_64: 724 dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1, 725 MEMTXATTRS_UNSPECIFIED); 726 dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2, 727 MEMTXATTRS_UNSPECIFIED); 728 dscr->length = le16_to_cpu(dscr->length); 729 dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8, 730 MEMTXATTRS_UNSPECIFIED); 731 dscr->addr = le64_to_cpu(dscr->addr); 732 dscr->attr &= (uint8_t) ~0xC0; 733 dscr->incr = 12; 734 break; 735 } 736 } 737 738 /* Advanced DMA data transfer */ 739 740 static void sdhci_do_adma(SDHCIState *s) 741 { 742 unsigned int begin, length; 743 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 744 const MemTxAttrs attrs = { .memory = true }; 745 ADMADescr dscr = {}; 746 MemTxResult res; 747 int i; 748 749 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { 750 /* Stop Multiple Transfer */ 751 sdhci_end_transfer(s); 752 return; 753 } 754 755 for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) { 756 s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH; 757 758 get_adma_description(s, &dscr); 759 trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr); 760 761 if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) { 762 /* Indicate that error occurred in ST_FDS state */ 763 s->admaerr &= ~SDHC_ADMAERR_STATE_MASK; 764 s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS; 765 766 /* Generate ADMA error interrupt */ 767 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 768 s->errintsts |= SDHC_EIS_ADMAERR; 769 s->norintsts |= SDHC_NIS_ERR; 770 } 771 772 sdhci_update_irq(s); 773 return; 774 } 775 776 length = dscr.length ? dscr.length : 64 * KiB; 777 778 switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) { 779 case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */ 780 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; 781 if (s->trnmod & SDHC_TRNS_READ) { 782 s->prnsts |= SDHC_DOING_READ; 783 while (length) { 784 if (s->data_count == 0) { 785 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 786 } 787 begin = s->data_count; 788 if ((length + begin) < block_size) { 789 s->data_count = length + begin; 790 length = 0; 791 } else { 792 s->data_count = block_size; 793 length -= block_size - begin; 794 } 795 res = dma_memory_write(s->dma_as, dscr.addr, 796 &s->fifo_buffer[begin], 797 s->data_count - begin, 798 attrs); 799 if (res != MEMTX_OK) { 800 break; 801 } 802 dscr.addr += s->data_count - begin; 803 if (s->data_count == block_size) { 804 s->data_count = 0; 805 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 806 s->blkcnt--; 807 if (s->blkcnt == 0) { 808 break; 809 } 810 } 811 } 812 } 813 } else { 814 s->prnsts |= SDHC_DOING_WRITE; 815 while (length) { 816 begin = s->data_count; 817 if ((length + begin) < block_size) { 818 s->data_count = length + begin; 819 length = 0; 820 } else { 821 s->data_count = block_size; 822 length -= block_size - begin; 823 } 824 res = dma_memory_read(s->dma_as, dscr.addr, 825 &s->fifo_buffer[begin], 826 s->data_count - begin, 827 attrs); 828 if (res != MEMTX_OK) { 829 break; 830 } 831 dscr.addr += s->data_count - begin; 832 if (s->data_count == block_size) { 833 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 834 s->data_count = 0; 835 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 836 s->blkcnt--; 837 if (s->blkcnt == 0) { 838 break; 839 } 840 } 841 } 842 } 843 } 844 if (res != MEMTX_OK) { 845 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 846 trace_sdhci_error("Set ADMA error flag"); 847 s->errintsts |= SDHC_EIS_ADMAERR; 848 s->norintsts |= SDHC_NIS_ERR; 849 } 850 sdhci_update_irq(s); 851 } else { 852 s->admasysaddr += dscr.incr; 853 } 854 break; 855 case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ 856 s->admasysaddr = dscr.addr; 857 trace_sdhci_adma("link", s->admasysaddr); 858 break; 859 default: 860 s->admasysaddr += dscr.incr; 861 break; 862 } 863 864 if (dscr.attr & SDHC_ADMA_ATTR_INT) { 865 trace_sdhci_adma("interrupt", s->admasysaddr); 866 if (s->norintstsen & SDHC_NISEN_DMA) { 867 s->norintsts |= SDHC_NIS_DMA; 868 } 869 870 if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) { 871 /* IRQ delivered, reschedule current transfer */ 872 break; 873 } 874 } 875 876 /* ADMA transfer terminates if blkcnt == 0 or by END attribute */ 877 if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 878 (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) { 879 trace_sdhci_adma_transfer_completed(); 880 if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) && 881 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 882 s->blkcnt != 0)) { 883 trace_sdhci_error("SD/MMC host ADMA length mismatch"); 884 s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH | 885 SDHC_ADMAERR_STATE_ST_TFR; 886 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 887 trace_sdhci_error("Set ADMA error flag"); 888 s->errintsts |= SDHC_EIS_ADMAERR; 889 s->norintsts |= SDHC_NIS_ERR; 890 } 891 892 sdhci_update_irq(s); 893 } 894 sdhci_end_transfer(s); 895 return; 896 } 897 898 } 899 900 /* we have unfinished business - reschedule to continue ADMA */ 901 timer_mod(s->transfer_timer, 902 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY); 903 } 904 905 /* Perform data transfer according to controller configuration */ 906 907 static void sdhci_data_transfer(void *opaque) 908 { 909 SDHCIState *s = (SDHCIState *)opaque; 910 911 if (s->trnmod & SDHC_TRNS_DMA) { 912 switch (SDHC_DMA_TYPE(s->hostctl1)) { 913 case SDHC_CTRL_SDMA: 914 if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { 915 sdhci_sdma_transfer_single_block(s); 916 } else { 917 sdhci_sdma_transfer_multi_blocks(s); 918 } 919 920 break; 921 case SDHC_CTRL_ADMA1_32: 922 if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) { 923 trace_sdhci_error("ADMA1 not supported"); 924 break; 925 } 926 927 sdhci_do_adma(s); 928 break; 929 case SDHC_CTRL_ADMA2_32: 930 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) { 931 trace_sdhci_error("ADMA2 not supported"); 932 break; 933 } 934 935 sdhci_do_adma(s); 936 break; 937 case SDHC_CTRL_ADMA2_64: 938 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) || 939 !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) { 940 trace_sdhci_error("64 bit ADMA not supported"); 941 break; 942 } 943 944 sdhci_do_adma(s); 945 break; 946 default: 947 trace_sdhci_error("Unsupported DMA type"); 948 break; 949 } 950 } else { 951 if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) { 952 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | 953 SDHC_DAT_LINE_ACTIVE; 954 sdhci_read_block_from_card(s); 955 } else { 956 s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE | 957 SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT; 958 sdhci_write_block_to_card(s); 959 } 960 } 961 } 962 963 static bool sdhci_can_issue_command(SDHCIState *s) 964 { 965 if (!SDHC_CLOCK_IS_ON(s->clkcon) || 966 (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) && 967 ((s->cmdreg & SDHC_CMD_DATA_PRESENT) || 968 ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY && 969 !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) { 970 return false; 971 } 972 973 return true; 974 } 975 976 /* The Buffer Data Port register must be accessed in sequential and 977 * continuous manner */ 978 static inline bool 979 sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) 980 { 981 if ((s->data_count & 0x3) != byte_num) { 982 trace_sdhci_error("Non-sequential access to Buffer Data Port register" 983 "is prohibited\n"); 984 return false; 985 } 986 return true; 987 } 988 989 static void sdhci_resume_pending_transfer(SDHCIState *s) 990 { 991 timer_del(s->transfer_timer); 992 sdhci_data_transfer(s); 993 } 994 995 static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size) 996 { 997 SDHCIState *s = (SDHCIState *)opaque; 998 uint32_t ret = 0; 999 1000 if (timer_pending(s->transfer_timer)) { 1001 sdhci_resume_pending_transfer(s); 1002 } 1003 1004 switch (offset & ~0x3) { 1005 case SDHC_SYSAD: 1006 ret = s->sdmasysad; 1007 break; 1008 case SDHC_BLKSIZE: 1009 ret = s->blksize | (s->blkcnt << 16); 1010 break; 1011 case SDHC_ARGUMENT: 1012 ret = s->argument; 1013 break; 1014 case SDHC_TRNMOD: 1015 ret = s->trnmod | (s->cmdreg << 16); 1016 break; 1017 case SDHC_RSPREG0 ... SDHC_RSPREG3: 1018 ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2]; 1019 break; 1020 case SDHC_BDATA: 1021 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1022 ret = sdhci_read_dataport(s, size); 1023 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1024 return ret; 1025 } 1026 break; 1027 case SDHC_PRNSTS: 1028 ret = s->prnsts; 1029 ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL, 1030 sdbus_get_dat_lines(&s->sdbus)); 1031 ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL, 1032 sdbus_get_cmd_line(&s->sdbus)); 1033 break; 1034 case SDHC_HOSTCTL: 1035 ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) | 1036 (s->wakcon << 24); 1037 break; 1038 case SDHC_CLKCON: 1039 ret = s->clkcon | (s->timeoutcon << 16); 1040 break; 1041 case SDHC_NORINTSTS: 1042 ret = s->norintsts | (s->errintsts << 16); 1043 break; 1044 case SDHC_NORINTSTSEN: 1045 ret = s->norintstsen | (s->errintstsen << 16); 1046 break; 1047 case SDHC_NORINTSIGEN: 1048 ret = s->norintsigen | (s->errintsigen << 16); 1049 break; 1050 case SDHC_ACMD12ERRSTS: 1051 ret = s->acmd12errsts | (s->hostctl2 << 16); 1052 break; 1053 case SDHC_CAPAB: 1054 ret = (uint32_t)s->capareg; 1055 break; 1056 case SDHC_CAPAB + 4: 1057 ret = (uint32_t)(s->capareg >> 32); 1058 break; 1059 case SDHC_MAXCURR: 1060 ret = (uint32_t)s->maxcurr; 1061 break; 1062 case SDHC_MAXCURR + 4: 1063 ret = (uint32_t)(s->maxcurr >> 32); 1064 break; 1065 case SDHC_ADMAERR: 1066 ret = s->admaerr; 1067 break; 1068 case SDHC_ADMASYSADDR: 1069 ret = (uint32_t)s->admasysaddr; 1070 break; 1071 case SDHC_ADMASYSADDR + 4: 1072 ret = (uint32_t)(s->admasysaddr >> 32); 1073 break; 1074 case SDHC_SLOT_INT_STATUS: 1075 ret = (s->version << 16) | sdhci_slotint(s); 1076 break; 1077 default: 1078 qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " " 1079 "not implemented\n", size, offset); 1080 break; 1081 } 1082 1083 ret >>= (offset & 0x3) * 8; 1084 ret &= (1ULL << (size * 8)) - 1; 1085 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1086 return ret; 1087 } 1088 1089 static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value) 1090 { 1091 if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) { 1092 return; 1093 } 1094 s->blkgap = value & SDHC_STOP_AT_GAP_REQ; 1095 1096 if ((value & SDHC_CONTINUE_REQ) && s->stopped_state && 1097 (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) { 1098 if (s->stopped_state == sdhc_gap_read) { 1099 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ; 1100 sdhci_read_block_from_card(s); 1101 } else { 1102 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE; 1103 sdhci_write_block_to_card(s); 1104 } 1105 s->stopped_state = sdhc_not_stopped; 1106 } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) { 1107 if (s->prnsts & SDHC_DOING_READ) { 1108 s->stopped_state = sdhc_gap_read; 1109 } else if (s->prnsts & SDHC_DOING_WRITE) { 1110 s->stopped_state = sdhc_gap_write; 1111 } 1112 } 1113 } 1114 1115 static inline void sdhci_reset_write(SDHCIState *s, uint8_t value) 1116 { 1117 switch (value) { 1118 case SDHC_RESET_ALL: 1119 sdhci_reset(s); 1120 break; 1121 case SDHC_RESET_CMD: 1122 s->prnsts &= ~SDHC_CMD_INHIBIT; 1123 s->norintsts &= ~SDHC_NIS_CMDCMP; 1124 break; 1125 case SDHC_RESET_DATA: 1126 s->data_count = 0; 1127 s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE | 1128 SDHC_DOING_READ | SDHC_DOING_WRITE | 1129 SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE); 1130 s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ); 1131 s->stopped_state = sdhc_not_stopped; 1132 s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY | 1133 SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP); 1134 break; 1135 } 1136 } 1137 1138 static void 1139 sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1140 { 1141 SDHCIState *s = (SDHCIState *)opaque; 1142 unsigned shift = 8 * (offset & 0x3); 1143 uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); 1144 uint32_t value = val; 1145 value <<= shift; 1146 1147 if (timer_pending(s->transfer_timer)) { 1148 sdhci_resume_pending_transfer(s); 1149 } 1150 1151 switch (offset & ~0x3) { 1152 case SDHC_SYSAD: 1153 if (!TRANSFERRING_DATA(s->prnsts)) { 1154 s->sdmasysad = (s->sdmasysad & mask) | value; 1155 MASKED_WRITE(s->sdmasysad, mask, value); 1156 /* Writing to last byte of sdmasysad might trigger transfer */ 1157 if (!(mask & 0xFF000000) && s->blkcnt && s->blksize && 1158 SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { 1159 if (s->trnmod & SDHC_TRNS_MULTI) { 1160 sdhci_sdma_transfer_multi_blocks(s); 1161 } else { 1162 sdhci_sdma_transfer_single_block(s); 1163 } 1164 } 1165 } 1166 break; 1167 case SDHC_BLKSIZE: 1168 if (!TRANSFERRING_DATA(s->prnsts)) { 1169 uint16_t blksize = s->blksize; 1170 1171 MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12)); 1172 MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); 1173 1174 /* Limit block size to the maximum buffer size */ 1175 if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { 1176 qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " 1177 "the maximum buffer 0x%x\n", __func__, s->blksize, 1178 s->buf_maxsz); 1179 1180 s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); 1181 } 1182 1183 /* 1184 * If the block size is programmed to a different value from 1185 * the previous one, reset the data pointer of s->fifo_buffer[] 1186 * so that s->fifo_buffer[] can be filled in using the new block 1187 * size in the next transfer. 1188 */ 1189 if (blksize != s->blksize) { 1190 s->data_count = 0; 1191 } 1192 } 1193 1194 break; 1195 case SDHC_ARGUMENT: 1196 MASKED_WRITE(s->argument, mask, value); 1197 break; 1198 case SDHC_TRNMOD: 1199 /* DMA can be enabled only if it is supported as indicated by 1200 * capabilities register */ 1201 if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { 1202 value &= ~SDHC_TRNS_DMA; 1203 } 1204 MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); 1205 MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); 1206 1207 /* Writing to the upper byte of CMDREG triggers SD command generation */ 1208 if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { 1209 break; 1210 } 1211 1212 sdhci_send_command(s); 1213 break; 1214 case SDHC_BDATA: 1215 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1216 sdhci_write_dataport(s, value >> shift, size); 1217 } 1218 break; 1219 case SDHC_HOSTCTL: 1220 if (!(mask & 0xFF0000)) { 1221 sdhci_blkgap_write(s, value >> 16); 1222 } 1223 MASKED_WRITE(s->hostctl1, mask, value); 1224 MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); 1225 MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); 1226 if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || 1227 !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { 1228 s->pwrcon &= ~SDHC_POWER_ON; 1229 } 1230 break; 1231 case SDHC_CLKCON: 1232 if (!(mask & 0xFF000000)) { 1233 sdhci_reset_write(s, value >> 24); 1234 } 1235 MASKED_WRITE(s->clkcon, mask, value); 1236 MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); 1237 if (s->clkcon & SDHC_CLOCK_INT_EN) { 1238 s->clkcon |= SDHC_CLOCK_INT_STABLE; 1239 } else { 1240 s->clkcon &= ~SDHC_CLOCK_INT_STABLE; 1241 } 1242 break; 1243 case SDHC_NORINTSTS: 1244 if (s->norintstsen & SDHC_NISEN_CARDINT) { 1245 value &= ~SDHC_NIS_CARDINT; 1246 } 1247 s->norintsts &= mask | ~value; 1248 s->errintsts &= (mask >> 16) | ~(value >> 16); 1249 if (s->errintsts) { 1250 s->norintsts |= SDHC_NIS_ERR; 1251 } else { 1252 s->norintsts &= ~SDHC_NIS_ERR; 1253 } 1254 sdhci_update_irq(s); 1255 break; 1256 case SDHC_NORINTSTSEN: 1257 MASKED_WRITE(s->norintstsen, mask, value); 1258 MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); 1259 s->norintsts &= s->norintstsen; 1260 s->errintsts &= s->errintstsen; 1261 if (s->errintsts) { 1262 s->norintsts |= SDHC_NIS_ERR; 1263 } else { 1264 s->norintsts &= ~SDHC_NIS_ERR; 1265 } 1266 /* Quirk for Raspberry Pi: pending card insert interrupt 1267 * appears when first enabled after power on */ 1268 if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { 1269 assert(s->pending_insert_quirk); 1270 s->norintsts |= SDHC_NIS_INSERT; 1271 s->pending_insert_state = false; 1272 } 1273 sdhci_update_irq(s); 1274 break; 1275 case SDHC_NORINTSIGEN: 1276 MASKED_WRITE(s->norintsigen, mask, value); 1277 MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); 1278 sdhci_update_irq(s); 1279 break; 1280 case SDHC_ADMAERR: 1281 MASKED_WRITE(s->admaerr, mask, value); 1282 break; 1283 case SDHC_ADMASYSADDR: 1284 s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | 1285 (uint64_t)mask)) | (uint64_t)value; 1286 break; 1287 case SDHC_ADMASYSADDR + 4: 1288 s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | 1289 ((uint64_t)mask << 32))) | ((uint64_t)value << 32); 1290 break; 1291 case SDHC_FEAER: 1292 s->acmd12errsts |= value; 1293 s->errintsts |= (value >> 16) & s->errintstsen; 1294 if (s->acmd12errsts) { 1295 s->errintsts |= SDHC_EIS_CMD12ERR; 1296 } 1297 if (s->errintsts) { 1298 s->norintsts |= SDHC_NIS_ERR; 1299 } 1300 sdhci_update_irq(s); 1301 break; 1302 case SDHC_ACMD12ERRSTS: 1303 MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX); 1304 if (s->uhs_mode >= UHS_I) { 1305 MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16); 1306 1307 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) { 1308 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V); 1309 } else { 1310 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V); 1311 } 1312 } 1313 break; 1314 1315 case SDHC_CAPAB: 1316 case SDHC_CAPAB + 4: 1317 case SDHC_MAXCURR: 1318 case SDHC_MAXCURR + 4: 1319 qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx 1320 " <- 0x%08x read-only\n", size, offset, value >> shift); 1321 break; 1322 1323 default: 1324 qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x " 1325 "not implemented\n", size, offset, value >> shift); 1326 break; 1327 } 1328 trace_sdhci_access("wr", size << 3, offset, "<-", 1329 value >> shift, value >> shift); 1330 } 1331 1332 static const MemoryRegionOps sdhci_mmio_ops = { 1333 .read = sdhci_read, 1334 .write = sdhci_write, 1335 .valid = { 1336 .min_access_size = 1, 1337 .max_access_size = 4, 1338 .unaligned = false 1339 }, 1340 .endianness = DEVICE_LITTLE_ENDIAN, 1341 }; 1342 1343 static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) 1344 { 1345 ERRP_GUARD(); 1346 1347 switch (s->sd_spec_version) { 1348 case 2 ... 3: 1349 break; 1350 default: 1351 error_setg(errp, "Only Spec v2/v3 are supported"); 1352 return; 1353 } 1354 s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1); 1355 1356 sdhci_check_capareg(s, errp); 1357 if (*errp) { 1358 return; 1359 } 1360 } 1361 1362 /* --- qdev common --- */ 1363 1364 void sdhci_initfn(SDHCIState *s) 1365 { 1366 qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); 1367 1368 s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); 1369 s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); 1370 1371 s->io_ops = &sdhci_mmio_ops; 1372 } 1373 1374 void sdhci_uninitfn(SDHCIState *s) 1375 { 1376 timer_free(s->insert_timer); 1377 timer_free(s->transfer_timer); 1378 1379 g_free(s->fifo_buffer); 1380 s->fifo_buffer = NULL; 1381 } 1382 1383 void sdhci_common_realize(SDHCIState *s, Error **errp) 1384 { 1385 ERRP_GUARD(); 1386 1387 sdhci_init_readonly_registers(s, errp); 1388 if (*errp) { 1389 return; 1390 } 1391 s->buf_maxsz = sdhci_get_fifolen(s); 1392 s->fifo_buffer = g_malloc0(s->buf_maxsz); 1393 1394 memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci", 1395 SDHC_REGISTERS_MAP_SIZE); 1396 } 1397 1398 void sdhci_common_unrealize(SDHCIState *s) 1399 { 1400 /* This function is expected to be called only once for each class: 1401 * - SysBus: via DeviceClass->unrealize(), 1402 * - PCI: via PCIDeviceClass->exit(). 1403 * However to avoid double-free and/or use-after-free we still nullify 1404 * this variable (better safe than sorry!). */ 1405 g_free(s->fifo_buffer); 1406 s->fifo_buffer = NULL; 1407 } 1408 1409 static bool sdhci_pending_insert_vmstate_needed(void *opaque) 1410 { 1411 SDHCIState *s = opaque; 1412 1413 return s->pending_insert_state; 1414 } 1415 1416 static const VMStateDescription sdhci_pending_insert_vmstate = { 1417 .name = "sdhci/pending-insert", 1418 .version_id = 1, 1419 .minimum_version_id = 1, 1420 .needed = sdhci_pending_insert_vmstate_needed, 1421 .fields = (VMStateField[]) { 1422 VMSTATE_BOOL(pending_insert_state, SDHCIState), 1423 VMSTATE_END_OF_LIST() 1424 }, 1425 }; 1426 1427 const VMStateDescription sdhci_vmstate = { 1428 .name = "sdhci", 1429 .version_id = 1, 1430 .minimum_version_id = 1, 1431 .fields = (VMStateField[]) { 1432 VMSTATE_UINT32(sdmasysad, SDHCIState), 1433 VMSTATE_UINT16(blksize, SDHCIState), 1434 VMSTATE_UINT16(blkcnt, SDHCIState), 1435 VMSTATE_UINT32(argument, SDHCIState), 1436 VMSTATE_UINT16(trnmod, SDHCIState), 1437 VMSTATE_UINT16(cmdreg, SDHCIState), 1438 VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4), 1439 VMSTATE_UINT32(prnsts, SDHCIState), 1440 VMSTATE_UINT8(hostctl1, SDHCIState), 1441 VMSTATE_UINT8(pwrcon, SDHCIState), 1442 VMSTATE_UINT8(blkgap, SDHCIState), 1443 VMSTATE_UINT8(wakcon, SDHCIState), 1444 VMSTATE_UINT16(clkcon, SDHCIState), 1445 VMSTATE_UINT8(timeoutcon, SDHCIState), 1446 VMSTATE_UINT8(admaerr, SDHCIState), 1447 VMSTATE_UINT16(norintsts, SDHCIState), 1448 VMSTATE_UINT16(errintsts, SDHCIState), 1449 VMSTATE_UINT16(norintstsen, SDHCIState), 1450 VMSTATE_UINT16(errintstsen, SDHCIState), 1451 VMSTATE_UINT16(norintsigen, SDHCIState), 1452 VMSTATE_UINT16(errintsigen, SDHCIState), 1453 VMSTATE_UINT16(acmd12errsts, SDHCIState), 1454 VMSTATE_UINT16(data_count, SDHCIState), 1455 VMSTATE_UINT64(admasysaddr, SDHCIState), 1456 VMSTATE_UINT8(stopped_state, SDHCIState), 1457 VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz), 1458 VMSTATE_TIMER_PTR(insert_timer, SDHCIState), 1459 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), 1460 VMSTATE_END_OF_LIST() 1461 }, 1462 .subsections = (const VMStateDescription*[]) { 1463 &sdhci_pending_insert_vmstate, 1464 NULL 1465 }, 1466 }; 1467 1468 void sdhci_common_class_init(ObjectClass *klass, void *data) 1469 { 1470 DeviceClass *dc = DEVICE_CLASS(klass); 1471 1472 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1473 dc->vmsd = &sdhci_vmstate; 1474 dc->reset = sdhci_poweron_reset; 1475 } 1476 1477 /* --- qdev SysBus --- */ 1478 1479 static Property sdhci_sysbus_properties[] = { 1480 DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), 1481 DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, 1482 false), 1483 DEFINE_PROP_LINK("dma", SDHCIState, 1484 dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), 1485 DEFINE_PROP_END_OF_LIST(), 1486 }; 1487 1488 static void sdhci_sysbus_init(Object *obj) 1489 { 1490 SDHCIState *s = SYSBUS_SDHCI(obj); 1491 1492 sdhci_initfn(s); 1493 } 1494 1495 static void sdhci_sysbus_finalize(Object *obj) 1496 { 1497 SDHCIState *s = SYSBUS_SDHCI(obj); 1498 1499 if (s->dma_mr) { 1500 object_unparent(OBJECT(s->dma_mr)); 1501 } 1502 1503 sdhci_uninitfn(s); 1504 } 1505 1506 static void sdhci_sysbus_realize(DeviceState *dev, Error **errp) 1507 { 1508 ERRP_GUARD(); 1509 SDHCIState *s = SYSBUS_SDHCI(dev); 1510 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1511 1512 sdhci_common_realize(s, errp); 1513 if (*errp) { 1514 return; 1515 } 1516 1517 if (s->dma_mr) { 1518 s->dma_as = &s->sysbus_dma_as; 1519 address_space_init(s->dma_as, s->dma_mr, "sdhci-dma"); 1520 } else { 1521 /* use system_memory() if property "dma" not set */ 1522 s->dma_as = &address_space_memory; 1523 } 1524 1525 sysbus_init_irq(sbd, &s->irq); 1526 1527 sysbus_init_mmio(sbd, &s->iomem); 1528 } 1529 1530 static void sdhci_sysbus_unrealize(DeviceState *dev) 1531 { 1532 SDHCIState *s = SYSBUS_SDHCI(dev); 1533 1534 sdhci_common_unrealize(s); 1535 1536 if (s->dma_mr) { 1537 address_space_destroy(s->dma_as); 1538 } 1539 } 1540 1541 static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) 1542 { 1543 DeviceClass *dc = DEVICE_CLASS(klass); 1544 1545 device_class_set_props(dc, sdhci_sysbus_properties); 1546 dc->realize = sdhci_sysbus_realize; 1547 dc->unrealize = sdhci_sysbus_unrealize; 1548 1549 sdhci_common_class_init(klass, data); 1550 } 1551 1552 static const TypeInfo sdhci_sysbus_info = { 1553 .name = TYPE_SYSBUS_SDHCI, 1554 .parent = TYPE_SYS_BUS_DEVICE, 1555 .instance_size = sizeof(SDHCIState), 1556 .instance_init = sdhci_sysbus_init, 1557 .instance_finalize = sdhci_sysbus_finalize, 1558 .class_init = sdhci_sysbus_class_init, 1559 }; 1560 1561 /* --- qdev bus master --- */ 1562 1563 static void sdhci_bus_class_init(ObjectClass *klass, void *data) 1564 { 1565 SDBusClass *sbc = SD_BUS_CLASS(klass); 1566 1567 sbc->set_inserted = sdhci_set_inserted; 1568 sbc->set_readonly = sdhci_set_readonly; 1569 } 1570 1571 static const TypeInfo sdhci_bus_info = { 1572 .name = TYPE_SDHCI_BUS, 1573 .parent = TYPE_SD_BUS, 1574 .instance_size = sizeof(SDBus), 1575 .class_init = sdhci_bus_class_init, 1576 }; 1577 1578 /* --- qdev i.MX eSDHC --- */ 1579 1580 #define USDHC_MIX_CTRL 0x48 1581 1582 #define USDHC_VENDOR_SPEC 0xc0 1583 #define USDHC_IMX_FRC_SDCLK_ON (1 << 8) 1584 1585 #define USDHC_DLL_CTRL 0x60 1586 1587 #define USDHC_TUNING_CTRL 0xcc 1588 #define USDHC_TUNE_CTRL_STATUS 0x68 1589 #define USDHC_WTMK_LVL 0x44 1590 1591 /* Undocumented register used by guests working around erratum ERR004536 */ 1592 #define USDHC_UNDOCUMENTED_REG27 0x6c 1593 1594 #define USDHC_CTRL_4BITBUS (0x1 << 1) 1595 #define USDHC_CTRL_8BITBUS (0x2 << 1) 1596 1597 #define USDHC_PRNSTS_SDSTB (1 << 3) 1598 1599 static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) 1600 { 1601 SDHCIState *s = SYSBUS_SDHCI(opaque); 1602 uint32_t ret; 1603 uint16_t hostctl1; 1604 1605 switch (offset) { 1606 default: 1607 return sdhci_read(opaque, offset, size); 1608 1609 case SDHC_HOSTCTL: 1610 /* 1611 * For a detailed explanation on the following bit 1612 * manipulation code see comments in a similar part of 1613 * usdhc_write() 1614 */ 1615 hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3); 1616 1617 if (s->hostctl1 & SDHC_CTRL_8BITBUS) { 1618 hostctl1 |= USDHC_CTRL_8BITBUS; 1619 } 1620 1621 if (s->hostctl1 & SDHC_CTRL_4BITBUS) { 1622 hostctl1 |= USDHC_CTRL_4BITBUS; 1623 } 1624 1625 ret = hostctl1; 1626 ret |= (uint32_t)s->blkgap << 16; 1627 ret |= (uint32_t)s->wakcon << 24; 1628 1629 break; 1630 1631 case SDHC_PRNSTS: 1632 /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ 1633 ret = sdhci_read(opaque, offset, size) & ~USDHC_PRNSTS_SDSTB; 1634 if (s->clkcon & SDHC_CLOCK_INT_STABLE) { 1635 ret |= USDHC_PRNSTS_SDSTB; 1636 } 1637 break; 1638 1639 case USDHC_VENDOR_SPEC: 1640 ret = s->vendor_spec; 1641 break; 1642 case USDHC_DLL_CTRL: 1643 case USDHC_TUNE_CTRL_STATUS: 1644 case USDHC_UNDOCUMENTED_REG27: 1645 case USDHC_TUNING_CTRL: 1646 case USDHC_MIX_CTRL: 1647 case USDHC_WTMK_LVL: 1648 ret = 0; 1649 break; 1650 } 1651 1652 return ret; 1653 } 1654 1655 static void 1656 usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1657 { 1658 SDHCIState *s = SYSBUS_SDHCI(opaque); 1659 uint8_t hostctl1; 1660 uint32_t value = (uint32_t)val; 1661 1662 switch (offset) { 1663 case USDHC_DLL_CTRL: 1664 case USDHC_TUNE_CTRL_STATUS: 1665 case USDHC_UNDOCUMENTED_REG27: 1666 case USDHC_TUNING_CTRL: 1667 case USDHC_WTMK_LVL: 1668 break; 1669 1670 case USDHC_VENDOR_SPEC: 1671 s->vendor_spec = value; 1672 switch (s->vendor) { 1673 case SDHCI_VENDOR_IMX: 1674 if (value & USDHC_IMX_FRC_SDCLK_ON) { 1675 s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; 1676 } else { 1677 s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; 1678 } 1679 break; 1680 default: 1681 break; 1682 } 1683 break; 1684 1685 case SDHC_HOSTCTL: 1686 /* 1687 * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL) 1688 * 1689 * 7 6 5 4 3 2 1 0 1690 * |-----------+--------+--------+-----------+----------+---------| 1691 * | Card | Card | Endian | DATA3 | Data | Led | 1692 * | Detect | Detect | Mode | as Card | Transfer | Control | 1693 * | Signal | Test | | Detection | Width | | 1694 * | Selection | Level | | Pin | | | 1695 * |-----------+--------+--------+-----------+----------+---------| 1696 * 1697 * and 0x29 1698 * 1699 * 15 10 9 8 1700 * |----------+------| 1701 * | Reserved | DMA | 1702 * | | Sel. | 1703 * | | | 1704 * |----------+------| 1705 * 1706 * and here's what SDCHI spec expects those offsets to be: 1707 * 1708 * 0x28 (Host Control Register) 1709 * 1710 * 7 6 5 4 3 2 1 0 1711 * |--------+--------+----------+------+--------+----------+---------| 1712 * | Card | Card | Extended | DMA | High | Data | LED | 1713 * | Detect | Detect | Data | Sel. | Speed | Transfer | Control | 1714 * | Signal | Test | Transfer | | Enable | Width | | 1715 * | Sel. | Level | Width | | | | | 1716 * |--------+--------+----------+------+--------+----------+---------| 1717 * 1718 * and 0x29 (Power Control Register) 1719 * 1720 * |----------------------------------| 1721 * | Power Control Register | 1722 * | | 1723 * | Description omitted, | 1724 * | since it has no analog in ESDHCI | 1725 * | | 1726 * |----------------------------------| 1727 * 1728 * Since offsets 0x2A and 0x2B should be compatible between 1729 * both IP specs we only need to reconcile least 16-bit of the 1730 * word we've been given. 1731 */ 1732 1733 /* 1734 * First, save bits 7 6 and 0 since they are identical 1735 */ 1736 hostctl1 = value & (SDHC_CTRL_LED | 1737 SDHC_CTRL_CDTEST_INS | 1738 SDHC_CTRL_CDTEST_EN); 1739 /* 1740 * Second, split "Data Transfer Width" from bits 2 and 1 in to 1741 * bits 5 and 1 1742 */ 1743 if (value & USDHC_CTRL_8BITBUS) { 1744 hostctl1 |= SDHC_CTRL_8BITBUS; 1745 } 1746 1747 if (value & USDHC_CTRL_4BITBUS) { 1748 hostctl1 |= USDHC_CTRL_4BITBUS; 1749 } 1750 1751 /* 1752 * Third, move DMA select from bits 9 and 8 to bits 4 and 3 1753 */ 1754 hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3)); 1755 1756 /* 1757 * Now place the corrected value into low 16-bit of the value 1758 * we are going to give standard SDHCI write function 1759 * 1760 * NOTE: This transformation should be the inverse of what can 1761 * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux 1762 * kernel 1763 */ 1764 value &= ~UINT16_MAX; 1765 value |= hostctl1; 1766 value |= (uint16_t)s->pwrcon << 8; 1767 1768 sdhci_write(opaque, offset, value, size); 1769 break; 1770 1771 case USDHC_MIX_CTRL: 1772 /* 1773 * So, when SD/MMC stack in Linux tries to write to "Transfer 1774 * Mode Register", ESDHC i.MX quirk code will translate it 1775 * into a write to ESDHC_MIX_CTRL, so we do the opposite in 1776 * order to get where we started 1777 * 1778 * Note that Auto CMD23 Enable bit is located in a wrong place 1779 * on i.MX, but since it is not used by QEMU we do not care. 1780 * 1781 * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...) 1782 * here becuase it will result in a call to 1783 * sdhci_send_command(s) which we don't want. 1784 * 1785 */ 1786 s->trnmod = value & UINT16_MAX; 1787 break; 1788 case SDHC_TRNMOD: 1789 /* 1790 * Similar to above, but this time a write to "Command 1791 * Register" will be translated into a 4-byte write to 1792 * "Transfer Mode register" where lower 16-bit of value would 1793 * be set to zero. So what we do is fill those bits with 1794 * cached value from s->trnmod and let the SDHCI 1795 * infrastructure handle the rest 1796 */ 1797 sdhci_write(opaque, offset, val | s->trnmod, size); 1798 break; 1799 case SDHC_BLKSIZE: 1800 /* 1801 * ESDHCI does not implement "Host SDMA Buffer Boundary", and 1802 * Linux driver will try to zero this field out which will 1803 * break the rest of SDHCI emulation. 1804 * 1805 * Linux defaults to maximum possible setting (512K boundary) 1806 * and it seems to be the only option that i.MX IP implements, 1807 * so we artificially set it to that value. 1808 */ 1809 val |= 0x7 << 12; 1810 /* FALLTHROUGH */ 1811 default: 1812 sdhci_write(opaque, offset, val, size); 1813 break; 1814 } 1815 } 1816 1817 static const MemoryRegionOps usdhc_mmio_ops = { 1818 .read = usdhc_read, 1819 .write = usdhc_write, 1820 .valid = { 1821 .min_access_size = 1, 1822 .max_access_size = 4, 1823 .unaligned = false 1824 }, 1825 .endianness = DEVICE_LITTLE_ENDIAN, 1826 }; 1827 1828 static void imx_usdhc_init(Object *obj) 1829 { 1830 SDHCIState *s = SYSBUS_SDHCI(obj); 1831 1832 s->io_ops = &usdhc_mmio_ops; 1833 s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; 1834 } 1835 1836 static const TypeInfo imx_usdhc_info = { 1837 .name = TYPE_IMX_USDHC, 1838 .parent = TYPE_SYSBUS_SDHCI, 1839 .instance_init = imx_usdhc_init, 1840 }; 1841 1842 /* --- qdev Samsung s3c --- */ 1843 1844 #define S3C_SDHCI_CONTROL2 0x80 1845 #define S3C_SDHCI_CONTROL3 0x84 1846 #define S3C_SDHCI_CONTROL4 0x8c 1847 1848 static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size) 1849 { 1850 uint64_t ret; 1851 1852 switch (offset) { 1853 case S3C_SDHCI_CONTROL2: 1854 case S3C_SDHCI_CONTROL3: 1855 case S3C_SDHCI_CONTROL4: 1856 /* ignore */ 1857 ret = 0; 1858 break; 1859 default: 1860 ret = sdhci_read(opaque, offset, size); 1861 break; 1862 } 1863 1864 return ret; 1865 } 1866 1867 static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val, 1868 unsigned size) 1869 { 1870 switch (offset) { 1871 case S3C_SDHCI_CONTROL2: 1872 case S3C_SDHCI_CONTROL3: 1873 case S3C_SDHCI_CONTROL4: 1874 /* ignore */ 1875 break; 1876 default: 1877 sdhci_write(opaque, offset, val, size); 1878 break; 1879 } 1880 } 1881 1882 static const MemoryRegionOps sdhci_s3c_mmio_ops = { 1883 .read = sdhci_s3c_read, 1884 .write = sdhci_s3c_write, 1885 .valid = { 1886 .min_access_size = 1, 1887 .max_access_size = 4, 1888 .unaligned = false 1889 }, 1890 .endianness = DEVICE_LITTLE_ENDIAN, 1891 }; 1892 1893 static void sdhci_s3c_init(Object *obj) 1894 { 1895 SDHCIState *s = SYSBUS_SDHCI(obj); 1896 1897 s->io_ops = &sdhci_s3c_mmio_ops; 1898 } 1899 1900 static const TypeInfo sdhci_s3c_info = { 1901 .name = TYPE_S3C_SDHCI , 1902 .parent = TYPE_SYSBUS_SDHCI, 1903 .instance_init = sdhci_s3c_init, 1904 }; 1905 1906 static void sdhci_register_types(void) 1907 { 1908 type_register_static(&sdhci_sysbus_info); 1909 type_register_static(&sdhci_bus_info); 1910 type_register_static(&imx_usdhc_info); 1911 type_register_static(&sdhci_s3c_info); 1912 } 1913 1914 type_init(sdhci_register_types) 1915