1 /* 2 * SD Association Host Standard Specification v2.0 controller emulation 3 * 4 * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf 5 * 6 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 7 * Mitsyanko Igor <i.mitsyanko@samsung.com> 8 * Peter A.G. Crosthwaite <peter.crosthwaite@petalogix.com> 9 * 10 * Based on MMC controller for Samsung S5PC1xx-based board emulation 11 * by Alexey Merkulov and Vladimir Monakhov. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 21 * See the GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, see <http://www.gnu.org/licenses/>. 25 */ 26 27 #include "qemu/osdep.h" 28 #include "qemu/units.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 #include "hw/irq.h" 32 #include "hw/qdev-properties.h" 33 #include "sysemu/dma.h" 34 #include "qemu/timer.h" 35 #include "qemu/bitops.h" 36 #include "hw/sd/sdhci.h" 37 #include "migration/vmstate.h" 38 #include "sdhci-internal.h" 39 #include "qemu/log.h" 40 #include "trace.h" 41 #include "qom/object.h" 42 43 #define TYPE_SDHCI_BUS "sdhci-bus" 44 /* This is reusing the SDBus typedef from SD_BUS */ 45 DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS, 46 TYPE_SDHCI_BUS) 47 48 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) 49 50 static inline unsigned int sdhci_get_fifolen(SDHCIState *s) 51 { 52 return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH)); 53 } 54 55 /* return true on error */ 56 static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc, 57 uint8_t freq, Error **errp) 58 { 59 if (s->sd_spec_version >= 3) { 60 return false; 61 } 62 switch (freq) { 63 case 0: 64 case 10 ... 63: 65 break; 66 default: 67 error_setg(errp, "SD %s clock frequency can have value" 68 "in range 0-63 only", desc); 69 return true; 70 } 71 return false; 72 } 73 74 static void sdhci_check_capareg(SDHCIState *s, Error **errp) 75 { 76 uint64_t msk = s->capareg; 77 uint32_t val; 78 bool y; 79 80 switch (s->sd_spec_version) { 81 case 4: 82 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4); 83 trace_sdhci_capareg("64-bit system bus (v4)", val); 84 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0); 85 86 val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II); 87 trace_sdhci_capareg("UHS-II", val); 88 msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0); 89 90 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3); 91 trace_sdhci_capareg("ADMA3", val); 92 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0); 93 94 /* fallthrough */ 95 case 3: 96 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT); 97 trace_sdhci_capareg("async interrupt", val); 98 msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0); 99 100 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE); 101 if (val) { 102 error_setg(errp, "slot-type not supported"); 103 return; 104 } 105 trace_sdhci_capareg("slot type", val); 106 msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0); 107 108 if (val != 2) { 109 val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT); 110 trace_sdhci_capareg("8-bit bus", val); 111 } 112 msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0); 113 114 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED); 115 trace_sdhci_capareg("bus speed mask", val); 116 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0); 117 118 val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH); 119 trace_sdhci_capareg("driver strength mask", val); 120 msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0); 121 122 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING); 123 trace_sdhci_capareg("timer re-tuning", val); 124 msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0); 125 126 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING); 127 trace_sdhci_capareg("use SDR50 tuning", val); 128 msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0); 129 130 val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE); 131 trace_sdhci_capareg("re-tuning mode", val); 132 msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0); 133 134 val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT); 135 trace_sdhci_capareg("clock multiplier", val); 136 msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0); 137 138 /* fallthrough */ 139 case 2: /* default version */ 140 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2); 141 trace_sdhci_capareg("ADMA2", val); 142 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0); 143 144 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1); 145 trace_sdhci_capareg("ADMA1", val); 146 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0); 147 148 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT); 149 trace_sdhci_capareg("64-bit system bus (v3)", val); 150 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0); 151 152 /* fallthrough */ 153 case 1: 154 y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT); 155 msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0); 156 157 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ); 158 trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val); 159 if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) { 160 return; 161 } 162 msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0); 163 164 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ); 165 trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val); 166 if (sdhci_check_capab_freq_range(s, "base", val, errp)) { 167 return; 168 } 169 msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0); 170 171 val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH); 172 if (val >= 3) { 173 error_setg(errp, "block size can be 512, 1024 or 2048 only"); 174 return; 175 } 176 trace_sdhci_capareg("max block length", sdhci_get_fifolen(s)); 177 msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0); 178 179 val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED); 180 trace_sdhci_capareg("high speed", val); 181 msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0); 182 183 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA); 184 trace_sdhci_capareg("SDMA", val); 185 msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0); 186 187 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME); 188 trace_sdhci_capareg("suspend/resume", val); 189 msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0); 190 191 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33); 192 trace_sdhci_capareg("3.3v", val); 193 msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0); 194 195 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30); 196 trace_sdhci_capareg("3.0v", val); 197 msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0); 198 199 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18); 200 trace_sdhci_capareg("1.8v", val); 201 msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0); 202 break; 203 204 default: 205 error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version); 206 } 207 if (msk) { 208 qemu_log_mask(LOG_UNIMP, 209 "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk); 210 } 211 } 212 213 static uint8_t sdhci_slotint(SDHCIState *s) 214 { 215 return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) || 216 ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) || 217 ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV)); 218 } 219 220 /* Return true if IRQ was pending and delivered */ 221 static bool sdhci_update_irq(SDHCIState *s) 222 { 223 bool pending = sdhci_slotint(s); 224 225 qemu_set_irq(s->irq, pending); 226 227 return pending; 228 } 229 230 static void sdhci_raise_insertion_irq(void *opaque) 231 { 232 SDHCIState *s = (SDHCIState *)opaque; 233 234 if (s->norintsts & SDHC_NIS_REMOVE) { 235 timer_mod(s->insert_timer, 236 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 237 } else { 238 s->prnsts = 0x1ff0000; 239 if (s->norintstsen & SDHC_NISEN_INSERT) { 240 s->norintsts |= SDHC_NIS_INSERT; 241 } 242 sdhci_update_irq(s); 243 } 244 } 245 246 static void sdhci_set_inserted(DeviceState *dev, bool level) 247 { 248 SDHCIState *s = (SDHCIState *)dev; 249 250 trace_sdhci_set_inserted(level ? "insert" : "eject"); 251 if ((s->norintsts & SDHC_NIS_REMOVE) && level) { 252 /* Give target some time to notice card ejection */ 253 timer_mod(s->insert_timer, 254 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 255 } else { 256 if (level) { 257 s->prnsts = 0x1ff0000; 258 if (s->norintstsen & SDHC_NISEN_INSERT) { 259 s->norintsts |= SDHC_NIS_INSERT; 260 } 261 } else { 262 s->prnsts = 0x1fa0000; 263 s->pwrcon &= ~SDHC_POWER_ON; 264 s->clkcon &= ~SDHC_CLOCK_SDCLK_EN; 265 if (s->norintstsen & SDHC_NISEN_REMOVE) { 266 s->norintsts |= SDHC_NIS_REMOVE; 267 } 268 } 269 sdhci_update_irq(s); 270 } 271 } 272 273 static void sdhci_set_readonly(DeviceState *dev, bool level) 274 { 275 SDHCIState *s = (SDHCIState *)dev; 276 277 if (level) { 278 s->prnsts &= ~SDHC_WRITE_PROTECT; 279 } else { 280 /* Write enabled */ 281 s->prnsts |= SDHC_WRITE_PROTECT; 282 } 283 } 284 285 static void sdhci_reset(SDHCIState *s) 286 { 287 DeviceState *dev = DEVICE(s); 288 289 timer_del(s->insert_timer); 290 timer_del(s->transfer_timer); 291 292 /* Set all registers to 0. Capabilities/Version registers are not cleared 293 * and assumed to always preserve their value, given to them during 294 * initialization */ 295 memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); 296 297 /* Reset other state based on current card insertion/readonly status */ 298 sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); 299 sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); 300 301 s->data_count = 0; 302 s->stopped_state = sdhc_not_stopped; 303 s->pending_insert_state = false; 304 } 305 306 static void sdhci_poweron_reset(DeviceState *dev) 307 { 308 /* QOM (ie power-on) reset. This is identical to reset 309 * commanded via device register apart from handling of the 310 * 'pending insert on powerup' quirk. 311 */ 312 SDHCIState *s = (SDHCIState *)dev; 313 314 sdhci_reset(s); 315 316 if (s->pending_insert_quirk) { 317 s->pending_insert_state = true; 318 } 319 } 320 321 static void sdhci_data_transfer(void *opaque); 322 323 #define BLOCK_SIZE_MASK (4 * KiB - 1) 324 325 static void sdhci_send_command(SDHCIState *s) 326 { 327 SDRequest request; 328 uint8_t response[16]; 329 int rlen; 330 bool timeout = false; 331 332 s->errintsts = 0; 333 s->acmd12errsts = 0; 334 request.cmd = s->cmdreg >> 8; 335 request.arg = s->argument; 336 337 trace_sdhci_send_command(request.cmd, request.arg); 338 rlen = sdbus_do_command(&s->sdbus, &request, response); 339 340 if (s->cmdreg & SDHC_CMD_RESPONSE) { 341 if (rlen == 4) { 342 s->rspreg[0] = ldl_be_p(response); 343 s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0; 344 trace_sdhci_response4(s->rspreg[0]); 345 } else if (rlen == 16) { 346 s->rspreg[0] = ldl_be_p(&response[11]); 347 s->rspreg[1] = ldl_be_p(&response[7]); 348 s->rspreg[2] = ldl_be_p(&response[3]); 349 s->rspreg[3] = (response[0] << 16) | (response[1] << 8) | 350 response[2]; 351 trace_sdhci_response16(s->rspreg[3], s->rspreg[2], 352 s->rspreg[1], s->rspreg[0]); 353 } else { 354 timeout = true; 355 trace_sdhci_error("timeout waiting for command response"); 356 if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) { 357 s->errintsts |= SDHC_EIS_CMDTIMEOUT; 358 s->norintsts |= SDHC_NIS_ERR; 359 } 360 } 361 362 if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 363 (s->norintstsen & SDHC_NISEN_TRSCMP) && 364 (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) { 365 s->norintsts |= SDHC_NIS_TRSCMP; 366 } 367 } 368 369 if (s->norintstsen & SDHC_NISEN_CMDCMP) { 370 s->norintsts |= SDHC_NIS_CMDCMP; 371 } 372 373 sdhci_update_irq(s); 374 375 if (!timeout && (s->blksize & BLOCK_SIZE_MASK) && 376 (s->cmdreg & SDHC_CMD_DATA_PRESENT)) { 377 s->data_count = 0; 378 sdhci_data_transfer(s); 379 } 380 } 381 382 static void sdhci_end_transfer(SDHCIState *s) 383 { 384 /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */ 385 if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) { 386 SDRequest request; 387 uint8_t response[16]; 388 389 request.cmd = 0x0C; 390 request.arg = 0; 391 trace_sdhci_end_transfer(request.cmd, request.arg); 392 sdbus_do_command(&s->sdbus, &request, response); 393 /* Auto CMD12 response goes to the upper Response register */ 394 s->rspreg[3] = ldl_be_p(response); 395 } 396 397 s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE | 398 SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT | 399 SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE); 400 401 if (s->norintstsen & SDHC_NISEN_TRSCMP) { 402 s->norintsts |= SDHC_NIS_TRSCMP; 403 } 404 405 sdhci_update_irq(s); 406 } 407 408 /* 409 * Programmed i/o data transfer 410 */ 411 412 /* Fill host controller's read buffer with BLKSIZE bytes of data from card */ 413 static void sdhci_read_block_from_card(SDHCIState *s) 414 { 415 const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK; 416 417 if ((s->trnmod & SDHC_TRNS_MULTI) && 418 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) { 419 return; 420 } 421 422 if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 423 /* Device is not in tuning */ 424 sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size); 425 } 426 427 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 428 /* Device is in tuning */ 429 s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK; 430 s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK; 431 s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ | 432 SDHC_DATA_INHIBIT); 433 goto read_done; 434 } 435 436 /* New data now available for READ through Buffer Port Register */ 437 s->prnsts |= SDHC_DATA_AVAILABLE; 438 if (s->norintstsen & SDHC_NISEN_RBUFRDY) { 439 s->norintsts |= SDHC_NIS_RBUFRDY; 440 } 441 442 /* Clear DAT line active status if that was the last block */ 443 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 444 ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) { 445 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 446 } 447 448 /* If stop at block gap request was set and it's not the last block of 449 * data - generate Block Event interrupt */ 450 if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) && 451 s->blkcnt != 1) { 452 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 453 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 454 s->norintsts |= SDHC_EIS_BLKGAP; 455 } 456 } 457 458 read_done: 459 sdhci_update_irq(s); 460 } 461 462 /* Read @size byte of data from host controller @s BUFFER DATA PORT register */ 463 static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) 464 { 465 uint32_t value = 0; 466 int i; 467 468 /* first check that a valid data exists in host controller input buffer */ 469 if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) { 470 trace_sdhci_error("read from empty buffer"); 471 return 0; 472 } 473 474 for (i = 0; i < size; i++) { 475 assert(s->data_count < s->buf_maxsz); 476 value |= s->fifo_buffer[s->data_count] << i * 8; 477 s->data_count++; 478 /* check if we've read all valid data (blksize bytes) from buffer */ 479 if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) { 480 trace_sdhci_read_dataport(s->data_count); 481 s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */ 482 s->data_count = 0; /* next buff read must start at position [0] */ 483 484 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 485 s->blkcnt--; 486 } 487 488 /* if that was the last block of data */ 489 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 490 ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) || 491 /* stop at gap request */ 492 (s->stopped_state == sdhc_gap_read && 493 !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) { 494 sdhci_end_transfer(s); 495 } else { /* if there are more data, read next block from card */ 496 sdhci_read_block_from_card(s); 497 } 498 break; 499 } 500 } 501 502 return value; 503 } 504 505 /* Write data from host controller FIFO to card */ 506 static void sdhci_write_block_to_card(SDHCIState *s) 507 { 508 if (s->prnsts & SDHC_SPACE_AVAILABLE) { 509 if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 510 s->norintsts |= SDHC_NIS_WBUFRDY; 511 } 512 sdhci_update_irq(s); 513 return; 514 } 515 516 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 517 if (s->blkcnt == 0) { 518 return; 519 } else { 520 s->blkcnt--; 521 } 522 } 523 524 sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK); 525 526 /* Next data can be written through BUFFER DATORT register */ 527 s->prnsts |= SDHC_SPACE_AVAILABLE; 528 529 /* Finish transfer if that was the last block of data */ 530 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 531 ((s->trnmod & SDHC_TRNS_MULTI) && 532 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) { 533 sdhci_end_transfer(s); 534 } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 535 s->norintsts |= SDHC_NIS_WBUFRDY; 536 } 537 538 /* Generate Block Gap Event if requested and if not the last block */ 539 if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) && 540 s->blkcnt > 0) { 541 s->prnsts &= ~SDHC_DOING_WRITE; 542 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 543 s->norintsts |= SDHC_EIS_BLKGAP; 544 } 545 sdhci_end_transfer(s); 546 } 547 548 sdhci_update_irq(s); 549 } 550 551 /* Write @size bytes of @value data to host controller @s Buffer Data Port 552 * register */ 553 static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) 554 { 555 unsigned i; 556 557 /* Check that there is free space left in a buffer */ 558 if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { 559 trace_sdhci_error("Can't write to data buffer: buffer full"); 560 return; 561 } 562 563 for (i = 0; i < size; i++) { 564 assert(s->data_count < s->buf_maxsz); 565 s->fifo_buffer[s->data_count] = value & 0xFF; 566 s->data_count++; 567 value >>= 8; 568 if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) { 569 trace_sdhci_write_dataport(s->data_count); 570 s->data_count = 0; 571 s->prnsts &= ~SDHC_SPACE_AVAILABLE; 572 if (s->prnsts & SDHC_DOING_WRITE) { 573 sdhci_write_block_to_card(s); 574 } 575 } 576 } 577 } 578 579 /* 580 * Single DMA data transfer 581 */ 582 583 /* Multi block SDMA transfer */ 584 static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) 585 { 586 bool page_aligned = false; 587 unsigned int begin; 588 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 589 uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12); 590 uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk); 591 592 if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) { 593 qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n"); 594 return; 595 } 596 597 /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for 598 * possible stop at page boundary if initial address is not page aligned, 599 * allow them to work properly */ 600 if ((s->sdmasysad % boundary_chk) == 0) { 601 page_aligned = true; 602 } 603 604 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; 605 if (s->trnmod & SDHC_TRNS_READ) { 606 s->prnsts |= SDHC_DOING_READ; 607 while (s->blkcnt) { 608 if (s->data_count == 0) { 609 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 610 } 611 begin = s->data_count; 612 if (((boundary_count + begin) < block_size) && page_aligned) { 613 s->data_count = boundary_count + begin; 614 boundary_count = 0; 615 } else { 616 s->data_count = block_size; 617 boundary_count -= block_size - begin; 618 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 619 s->blkcnt--; 620 } 621 } 622 dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], 623 s->data_count - begin, MEMTXATTRS_UNSPECIFIED); 624 s->sdmasysad += s->data_count - begin; 625 if (s->data_count == block_size) { 626 s->data_count = 0; 627 } 628 if (page_aligned && boundary_count == 0) { 629 break; 630 } 631 } 632 } else { 633 s->prnsts |= SDHC_DOING_WRITE; 634 while (s->blkcnt) { 635 begin = s->data_count; 636 if (((boundary_count + begin) < block_size) && page_aligned) { 637 s->data_count = boundary_count + begin; 638 boundary_count = 0; 639 } else { 640 s->data_count = block_size; 641 boundary_count -= block_size - begin; 642 } 643 dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], 644 s->data_count - begin, MEMTXATTRS_UNSPECIFIED); 645 s->sdmasysad += s->data_count - begin; 646 if (s->data_count == block_size) { 647 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 648 s->data_count = 0; 649 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 650 s->blkcnt--; 651 } 652 } 653 if (page_aligned && boundary_count == 0) { 654 break; 655 } 656 } 657 } 658 659 if (s->blkcnt == 0) { 660 sdhci_end_transfer(s); 661 } else { 662 if (s->norintstsen & SDHC_NISEN_DMA) { 663 s->norintsts |= SDHC_NIS_DMA; 664 } 665 sdhci_update_irq(s); 666 } 667 } 668 669 /* single block SDMA transfer */ 670 static void sdhci_sdma_transfer_single_block(SDHCIState *s) 671 { 672 uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK; 673 674 if (s->trnmod & SDHC_TRNS_READ) { 675 sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); 676 dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, 677 MEMTXATTRS_UNSPECIFIED); 678 } else { 679 dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, 680 MEMTXATTRS_UNSPECIFIED); 681 sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); 682 } 683 s->blkcnt--; 684 685 sdhci_end_transfer(s); 686 } 687 688 typedef struct ADMADescr { 689 hwaddr addr; 690 uint16_t length; 691 uint8_t attr; 692 uint8_t incr; 693 } ADMADescr; 694 695 static void get_adma_description(SDHCIState *s, ADMADescr *dscr) 696 { 697 uint32_t adma1 = 0; 698 uint64_t adma2 = 0; 699 hwaddr entry_addr = (hwaddr)s->admasysaddr; 700 switch (SDHC_DMA_TYPE(s->hostctl1)) { 701 case SDHC_CTRL_ADMA2_32: 702 dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2), 703 MEMTXATTRS_UNSPECIFIED); 704 adma2 = le64_to_cpu(adma2); 705 /* The spec does not specify endianness of descriptor table. 706 * We currently assume that it is LE. 707 */ 708 dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull; 709 dscr->length = (uint16_t)extract64(adma2, 16, 16); 710 dscr->attr = (uint8_t)extract64(adma2, 0, 7); 711 dscr->incr = 8; 712 break; 713 case SDHC_CTRL_ADMA1_32: 714 dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1), 715 MEMTXATTRS_UNSPECIFIED); 716 adma1 = le32_to_cpu(adma1); 717 dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); 718 dscr->attr = (uint8_t)extract32(adma1, 0, 7); 719 dscr->incr = 4; 720 if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) { 721 dscr->length = (uint16_t)extract32(adma1, 12, 16); 722 } else { 723 dscr->length = 4 * KiB; 724 } 725 break; 726 case SDHC_CTRL_ADMA2_64: 727 dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1, 728 MEMTXATTRS_UNSPECIFIED); 729 dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2, 730 MEMTXATTRS_UNSPECIFIED); 731 dscr->length = le16_to_cpu(dscr->length); 732 dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8, 733 MEMTXATTRS_UNSPECIFIED); 734 dscr->addr = le64_to_cpu(dscr->addr); 735 dscr->attr &= (uint8_t) ~0xC0; 736 dscr->incr = 12; 737 break; 738 } 739 } 740 741 /* Advanced DMA data transfer */ 742 743 static void sdhci_do_adma(SDHCIState *s) 744 { 745 unsigned int begin, length; 746 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 747 const MemTxAttrs attrs = { .memory = true }; 748 ADMADescr dscr = {}; 749 MemTxResult res = MEMTX_ERROR; 750 int i; 751 752 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { 753 /* Stop Multiple Transfer */ 754 sdhci_end_transfer(s); 755 return; 756 } 757 758 for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) { 759 s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH; 760 761 get_adma_description(s, &dscr); 762 trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr); 763 764 if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) { 765 /* Indicate that error occurred in ST_FDS state */ 766 s->admaerr &= ~SDHC_ADMAERR_STATE_MASK; 767 s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS; 768 769 /* Generate ADMA error interrupt */ 770 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 771 s->errintsts |= SDHC_EIS_ADMAERR; 772 s->norintsts |= SDHC_NIS_ERR; 773 } 774 775 sdhci_update_irq(s); 776 return; 777 } 778 779 length = dscr.length ? dscr.length : 64 * KiB; 780 781 switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) { 782 case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */ 783 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; 784 if (s->trnmod & SDHC_TRNS_READ) { 785 s->prnsts |= SDHC_DOING_READ; 786 while (length) { 787 if (s->data_count == 0) { 788 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 789 } 790 begin = s->data_count; 791 if ((length + begin) < block_size) { 792 s->data_count = length + begin; 793 length = 0; 794 } else { 795 s->data_count = block_size; 796 length -= block_size - begin; 797 } 798 res = dma_memory_write(s->dma_as, dscr.addr, 799 &s->fifo_buffer[begin], 800 s->data_count - begin, 801 attrs); 802 if (res != MEMTX_OK) { 803 break; 804 } 805 dscr.addr += s->data_count - begin; 806 if (s->data_count == block_size) { 807 s->data_count = 0; 808 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 809 s->blkcnt--; 810 if (s->blkcnt == 0) { 811 break; 812 } 813 } 814 } 815 } 816 } else { 817 s->prnsts |= SDHC_DOING_WRITE; 818 while (length) { 819 begin = s->data_count; 820 if ((length + begin) < block_size) { 821 s->data_count = length + begin; 822 length = 0; 823 } else { 824 s->data_count = block_size; 825 length -= block_size - begin; 826 } 827 res = dma_memory_read(s->dma_as, dscr.addr, 828 &s->fifo_buffer[begin], 829 s->data_count - begin, 830 attrs); 831 if (res != MEMTX_OK) { 832 break; 833 } 834 dscr.addr += s->data_count - begin; 835 if (s->data_count == block_size) { 836 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 837 s->data_count = 0; 838 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 839 s->blkcnt--; 840 if (s->blkcnt == 0) { 841 break; 842 } 843 } 844 } 845 } 846 } 847 if (res != MEMTX_OK) { 848 s->data_count = 0; 849 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 850 trace_sdhci_error("Set ADMA error flag"); 851 s->errintsts |= SDHC_EIS_ADMAERR; 852 s->norintsts |= SDHC_NIS_ERR; 853 } 854 sdhci_update_irq(s); 855 } else { 856 s->admasysaddr += dscr.incr; 857 } 858 break; 859 case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ 860 s->admasysaddr = dscr.addr; 861 trace_sdhci_adma("link", s->admasysaddr); 862 break; 863 default: 864 s->admasysaddr += dscr.incr; 865 break; 866 } 867 868 if (dscr.attr & SDHC_ADMA_ATTR_INT) { 869 trace_sdhci_adma("interrupt", s->admasysaddr); 870 if (s->norintstsen & SDHC_NISEN_DMA) { 871 s->norintsts |= SDHC_NIS_DMA; 872 } 873 874 if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) { 875 /* IRQ delivered, reschedule current transfer */ 876 break; 877 } 878 } 879 880 /* ADMA transfer terminates if blkcnt == 0 or by END attribute */ 881 if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 882 (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) { 883 trace_sdhci_adma_transfer_completed(); 884 if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) && 885 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 886 s->blkcnt != 0)) { 887 trace_sdhci_error("SD/MMC host ADMA length mismatch"); 888 s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH | 889 SDHC_ADMAERR_STATE_ST_TFR; 890 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 891 trace_sdhci_error("Set ADMA error flag"); 892 s->errintsts |= SDHC_EIS_ADMAERR; 893 s->norintsts |= SDHC_NIS_ERR; 894 } 895 896 sdhci_update_irq(s); 897 } 898 sdhci_end_transfer(s); 899 return; 900 } 901 902 } 903 904 /* we have unfinished business - reschedule to continue ADMA */ 905 timer_mod(s->transfer_timer, 906 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY); 907 } 908 909 /* Perform data transfer according to controller configuration */ 910 911 static void sdhci_data_transfer(void *opaque) 912 { 913 SDHCIState *s = (SDHCIState *)opaque; 914 915 if (s->trnmod & SDHC_TRNS_DMA) { 916 switch (SDHC_DMA_TYPE(s->hostctl1)) { 917 case SDHC_CTRL_SDMA: 918 if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { 919 sdhci_sdma_transfer_single_block(s); 920 } else { 921 sdhci_sdma_transfer_multi_blocks(s); 922 } 923 924 break; 925 case SDHC_CTRL_ADMA1_32: 926 if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) { 927 trace_sdhci_error("ADMA1 not supported"); 928 break; 929 } 930 931 sdhci_do_adma(s); 932 break; 933 case SDHC_CTRL_ADMA2_32: 934 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) { 935 trace_sdhci_error("ADMA2 not supported"); 936 break; 937 } 938 939 sdhci_do_adma(s); 940 break; 941 case SDHC_CTRL_ADMA2_64: 942 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) || 943 !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) { 944 trace_sdhci_error("64 bit ADMA not supported"); 945 break; 946 } 947 948 sdhci_do_adma(s); 949 break; 950 default: 951 trace_sdhci_error("Unsupported DMA type"); 952 break; 953 } 954 } else { 955 if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) { 956 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | 957 SDHC_DAT_LINE_ACTIVE; 958 sdhci_read_block_from_card(s); 959 } else { 960 s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE | 961 SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT; 962 sdhci_write_block_to_card(s); 963 } 964 } 965 } 966 967 static bool sdhci_can_issue_command(SDHCIState *s) 968 { 969 if (!SDHC_CLOCK_IS_ON(s->clkcon) || 970 (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) && 971 ((s->cmdreg & SDHC_CMD_DATA_PRESENT) || 972 ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY && 973 !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) { 974 return false; 975 } 976 977 return true; 978 } 979 980 /* The Buffer Data Port register must be accessed in sequential and 981 * continuous manner */ 982 static inline bool 983 sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) 984 { 985 if ((s->data_count & 0x3) != byte_num) { 986 qemu_log_mask(LOG_GUEST_ERROR, 987 "SDHCI: Non-sequential access to Buffer Data Port" 988 " register is prohibited\n"); 989 return false; 990 } 991 return true; 992 } 993 994 static void sdhci_resume_pending_transfer(SDHCIState *s) 995 { 996 timer_del(s->transfer_timer); 997 sdhci_data_transfer(s); 998 } 999 1000 static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size) 1001 { 1002 SDHCIState *s = (SDHCIState *)opaque; 1003 uint32_t ret = 0; 1004 1005 if (timer_pending(s->transfer_timer)) { 1006 sdhci_resume_pending_transfer(s); 1007 } 1008 1009 switch (offset & ~0x3) { 1010 case SDHC_SYSAD: 1011 ret = s->sdmasysad; 1012 break; 1013 case SDHC_BLKSIZE: 1014 ret = s->blksize | (s->blkcnt << 16); 1015 break; 1016 case SDHC_ARGUMENT: 1017 ret = s->argument; 1018 break; 1019 case SDHC_TRNMOD: 1020 ret = s->trnmod | (s->cmdreg << 16); 1021 break; 1022 case SDHC_RSPREG0 ... SDHC_RSPREG3: 1023 ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2]; 1024 break; 1025 case SDHC_BDATA: 1026 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1027 ret = sdhci_read_dataport(s, size); 1028 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1029 return ret; 1030 } 1031 break; 1032 case SDHC_PRNSTS: 1033 ret = s->prnsts; 1034 ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL, 1035 sdbus_get_dat_lines(&s->sdbus)); 1036 ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL, 1037 sdbus_get_cmd_line(&s->sdbus)); 1038 break; 1039 case SDHC_HOSTCTL: 1040 ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) | 1041 (s->wakcon << 24); 1042 break; 1043 case SDHC_CLKCON: 1044 ret = s->clkcon | (s->timeoutcon << 16); 1045 break; 1046 case SDHC_NORINTSTS: 1047 ret = s->norintsts | (s->errintsts << 16); 1048 break; 1049 case SDHC_NORINTSTSEN: 1050 ret = s->norintstsen | (s->errintstsen << 16); 1051 break; 1052 case SDHC_NORINTSIGEN: 1053 ret = s->norintsigen | (s->errintsigen << 16); 1054 break; 1055 case SDHC_ACMD12ERRSTS: 1056 ret = s->acmd12errsts | (s->hostctl2 << 16); 1057 break; 1058 case SDHC_CAPAB: 1059 ret = (uint32_t)s->capareg; 1060 break; 1061 case SDHC_CAPAB + 4: 1062 ret = (uint32_t)(s->capareg >> 32); 1063 break; 1064 case SDHC_MAXCURR: 1065 ret = (uint32_t)s->maxcurr; 1066 break; 1067 case SDHC_MAXCURR + 4: 1068 ret = (uint32_t)(s->maxcurr >> 32); 1069 break; 1070 case SDHC_ADMAERR: 1071 ret = s->admaerr; 1072 break; 1073 case SDHC_ADMASYSADDR: 1074 ret = (uint32_t)s->admasysaddr; 1075 break; 1076 case SDHC_ADMASYSADDR + 4: 1077 ret = (uint32_t)(s->admasysaddr >> 32); 1078 break; 1079 case SDHC_SLOT_INT_STATUS: 1080 ret = (s->version << 16) | sdhci_slotint(s); 1081 break; 1082 default: 1083 qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " " 1084 "not implemented\n", size, offset); 1085 break; 1086 } 1087 1088 ret >>= (offset & 0x3) * 8; 1089 ret &= (1ULL << (size * 8)) - 1; 1090 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1091 return ret; 1092 } 1093 1094 static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value) 1095 { 1096 if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) { 1097 return; 1098 } 1099 s->blkgap = value & SDHC_STOP_AT_GAP_REQ; 1100 1101 if ((value & SDHC_CONTINUE_REQ) && s->stopped_state && 1102 (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) { 1103 if (s->stopped_state == sdhc_gap_read) { 1104 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ; 1105 sdhci_read_block_from_card(s); 1106 } else { 1107 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE; 1108 sdhci_write_block_to_card(s); 1109 } 1110 s->stopped_state = sdhc_not_stopped; 1111 } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) { 1112 if (s->prnsts & SDHC_DOING_READ) { 1113 s->stopped_state = sdhc_gap_read; 1114 } else if (s->prnsts & SDHC_DOING_WRITE) { 1115 s->stopped_state = sdhc_gap_write; 1116 } 1117 } 1118 } 1119 1120 static inline void sdhci_reset_write(SDHCIState *s, uint8_t value) 1121 { 1122 switch (value) { 1123 case SDHC_RESET_ALL: 1124 sdhci_reset(s); 1125 break; 1126 case SDHC_RESET_CMD: 1127 s->prnsts &= ~SDHC_CMD_INHIBIT; 1128 s->norintsts &= ~SDHC_NIS_CMDCMP; 1129 break; 1130 case SDHC_RESET_DATA: 1131 s->data_count = 0; 1132 s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE | 1133 SDHC_DOING_READ | SDHC_DOING_WRITE | 1134 SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE); 1135 s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ); 1136 s->stopped_state = sdhc_not_stopped; 1137 s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY | 1138 SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP); 1139 break; 1140 } 1141 } 1142 1143 static void 1144 sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1145 { 1146 SDHCIState *s = (SDHCIState *)opaque; 1147 unsigned shift = 8 * (offset & 0x3); 1148 uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); 1149 uint32_t value = val; 1150 value <<= shift; 1151 1152 if (timer_pending(s->transfer_timer)) { 1153 sdhci_resume_pending_transfer(s); 1154 } 1155 1156 switch (offset & ~0x3) { 1157 case SDHC_SYSAD: 1158 if (!TRANSFERRING_DATA(s->prnsts)) { 1159 s->sdmasysad = (s->sdmasysad & mask) | value; 1160 MASKED_WRITE(s->sdmasysad, mask, value); 1161 /* Writing to last byte of sdmasysad might trigger transfer */ 1162 if (!(mask & 0xFF000000) && s->blkcnt && 1163 (s->blksize & BLOCK_SIZE_MASK) && 1164 SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { 1165 if (s->trnmod & SDHC_TRNS_MULTI) { 1166 sdhci_sdma_transfer_multi_blocks(s); 1167 } else { 1168 sdhci_sdma_transfer_single_block(s); 1169 } 1170 } 1171 } 1172 break; 1173 case SDHC_BLKSIZE: 1174 if (!TRANSFERRING_DATA(s->prnsts)) { 1175 uint16_t blksize = s->blksize; 1176 1177 /* 1178 * [14:12] SDMA Buffer Boundary 1179 * [11:00] Transfer Block Size 1180 */ 1181 MASKED_WRITE(s->blksize, mask, extract32(value, 0, 15)); 1182 MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); 1183 1184 /* Limit block size to the maximum buffer size */ 1185 if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { 1186 qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " 1187 "the maximum buffer 0x%x\n", __func__, s->blksize, 1188 s->buf_maxsz); 1189 1190 s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); 1191 } 1192 1193 /* 1194 * If the block size is programmed to a different value from 1195 * the previous one, reset the data pointer of s->fifo_buffer[] 1196 * so that s->fifo_buffer[] can be filled in using the new block 1197 * size in the next transfer. 1198 */ 1199 if (blksize != s->blksize) { 1200 s->data_count = 0; 1201 } 1202 } 1203 1204 break; 1205 case SDHC_ARGUMENT: 1206 MASKED_WRITE(s->argument, mask, value); 1207 break; 1208 case SDHC_TRNMOD: 1209 /* DMA can be enabled only if it is supported as indicated by 1210 * capabilities register */ 1211 if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { 1212 value &= ~SDHC_TRNS_DMA; 1213 } 1214 1215 /* TRNMOD writes are inhibited while Command Inhibit (DAT) is true */ 1216 if (s->prnsts & SDHC_DATA_INHIBIT) { 1217 mask |= 0xffff; 1218 } 1219 1220 MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); 1221 MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); 1222 1223 /* Writing to the upper byte of CMDREG triggers SD command generation */ 1224 if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { 1225 break; 1226 } 1227 1228 sdhci_send_command(s); 1229 break; 1230 case SDHC_BDATA: 1231 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1232 sdhci_write_dataport(s, value >> shift, size); 1233 } 1234 break; 1235 case SDHC_HOSTCTL: 1236 if (!(mask & 0xFF0000)) { 1237 sdhci_blkgap_write(s, value >> 16); 1238 } 1239 MASKED_WRITE(s->hostctl1, mask, value); 1240 MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); 1241 MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); 1242 if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || 1243 !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { 1244 s->pwrcon &= ~SDHC_POWER_ON; 1245 } 1246 break; 1247 case SDHC_CLKCON: 1248 if (!(mask & 0xFF000000)) { 1249 sdhci_reset_write(s, value >> 24); 1250 } 1251 MASKED_WRITE(s->clkcon, mask, value); 1252 MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); 1253 if (s->clkcon & SDHC_CLOCK_INT_EN) { 1254 s->clkcon |= SDHC_CLOCK_INT_STABLE; 1255 } else { 1256 s->clkcon &= ~SDHC_CLOCK_INT_STABLE; 1257 } 1258 break; 1259 case SDHC_NORINTSTS: 1260 if (s->norintstsen & SDHC_NISEN_CARDINT) { 1261 value &= ~SDHC_NIS_CARDINT; 1262 } 1263 s->norintsts &= mask | ~value; 1264 s->errintsts &= (mask >> 16) | ~(value >> 16); 1265 if (s->errintsts) { 1266 s->norintsts |= SDHC_NIS_ERR; 1267 } else { 1268 s->norintsts &= ~SDHC_NIS_ERR; 1269 } 1270 sdhci_update_irq(s); 1271 break; 1272 case SDHC_NORINTSTSEN: 1273 MASKED_WRITE(s->norintstsen, mask, value); 1274 MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); 1275 s->norintsts &= s->norintstsen; 1276 s->errintsts &= s->errintstsen; 1277 if (s->errintsts) { 1278 s->norintsts |= SDHC_NIS_ERR; 1279 } else { 1280 s->norintsts &= ~SDHC_NIS_ERR; 1281 } 1282 /* Quirk for Raspberry Pi: pending card insert interrupt 1283 * appears when first enabled after power on */ 1284 if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { 1285 assert(s->pending_insert_quirk); 1286 s->norintsts |= SDHC_NIS_INSERT; 1287 s->pending_insert_state = false; 1288 } 1289 sdhci_update_irq(s); 1290 break; 1291 case SDHC_NORINTSIGEN: 1292 MASKED_WRITE(s->norintsigen, mask, value); 1293 MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); 1294 sdhci_update_irq(s); 1295 break; 1296 case SDHC_ADMAERR: 1297 MASKED_WRITE(s->admaerr, mask, value); 1298 break; 1299 case SDHC_ADMASYSADDR: 1300 s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | 1301 (uint64_t)mask)) | (uint64_t)value; 1302 break; 1303 case SDHC_ADMASYSADDR + 4: 1304 s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | 1305 ((uint64_t)mask << 32))) | ((uint64_t)value << 32); 1306 break; 1307 case SDHC_FEAER: 1308 s->acmd12errsts |= value; 1309 s->errintsts |= (value >> 16) & s->errintstsen; 1310 if (s->acmd12errsts) { 1311 s->errintsts |= SDHC_EIS_CMD12ERR; 1312 } 1313 if (s->errintsts) { 1314 s->norintsts |= SDHC_NIS_ERR; 1315 } 1316 sdhci_update_irq(s); 1317 break; 1318 case SDHC_ACMD12ERRSTS: 1319 MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX); 1320 if (s->uhs_mode >= UHS_I) { 1321 MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16); 1322 1323 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) { 1324 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V); 1325 } else { 1326 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V); 1327 } 1328 } 1329 break; 1330 1331 case SDHC_CAPAB: 1332 case SDHC_CAPAB + 4: 1333 case SDHC_MAXCURR: 1334 case SDHC_MAXCURR + 4: 1335 qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx 1336 " <- 0x%08x read-only\n", size, offset, value >> shift); 1337 break; 1338 1339 default: 1340 qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x " 1341 "not implemented\n", size, offset, value >> shift); 1342 break; 1343 } 1344 trace_sdhci_access("wr", size << 3, offset, "<-", 1345 value >> shift, value >> shift); 1346 } 1347 1348 static const MemoryRegionOps sdhci_mmio_le_ops = { 1349 .read = sdhci_read, 1350 .write = sdhci_write, 1351 .valid = { 1352 .min_access_size = 1, 1353 .max_access_size = 4, 1354 .unaligned = false 1355 }, 1356 .endianness = DEVICE_LITTLE_ENDIAN, 1357 }; 1358 1359 static const MemoryRegionOps sdhci_mmio_be_ops = { 1360 .read = sdhci_read, 1361 .write = sdhci_write, 1362 .impl = { 1363 .min_access_size = 4, 1364 .max_access_size = 4, 1365 }, 1366 .valid = { 1367 .min_access_size = 1, 1368 .max_access_size = 4, 1369 .unaligned = false 1370 }, 1371 .endianness = DEVICE_BIG_ENDIAN, 1372 }; 1373 1374 static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) 1375 { 1376 ERRP_GUARD(); 1377 1378 switch (s->sd_spec_version) { 1379 case 2 ... 3: 1380 break; 1381 default: 1382 error_setg(errp, "Only Spec v2/v3 are supported"); 1383 return; 1384 } 1385 s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1); 1386 1387 sdhci_check_capareg(s, errp); 1388 if (*errp) { 1389 return; 1390 } 1391 } 1392 1393 /* --- qdev common --- */ 1394 1395 void sdhci_initfn(SDHCIState *s) 1396 { 1397 qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); 1398 1399 s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); 1400 s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); 1401 1402 s->io_ops = &sdhci_mmio_le_ops; 1403 } 1404 1405 void sdhci_uninitfn(SDHCIState *s) 1406 { 1407 timer_free(s->insert_timer); 1408 timer_free(s->transfer_timer); 1409 1410 g_free(s->fifo_buffer); 1411 s->fifo_buffer = NULL; 1412 } 1413 1414 void sdhci_common_realize(SDHCIState *s, Error **errp) 1415 { 1416 ERRP_GUARD(); 1417 1418 switch (s->endianness) { 1419 case DEVICE_LITTLE_ENDIAN: 1420 /* s->io_ops is little endian by default */ 1421 break; 1422 case DEVICE_BIG_ENDIAN: 1423 if (s->io_ops != &sdhci_mmio_le_ops) { 1424 error_setg(errp, "SD controller doesn't support big endianness"); 1425 return; 1426 } 1427 s->io_ops = &sdhci_mmio_be_ops; 1428 break; 1429 default: 1430 error_setg(errp, "Incorrect endianness"); 1431 return; 1432 } 1433 1434 sdhci_init_readonly_registers(s, errp); 1435 if (*errp) { 1436 return; 1437 } 1438 1439 s->buf_maxsz = sdhci_get_fifolen(s); 1440 s->fifo_buffer = g_malloc0(s->buf_maxsz); 1441 1442 memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci", 1443 SDHC_REGISTERS_MAP_SIZE); 1444 } 1445 1446 void sdhci_common_unrealize(SDHCIState *s) 1447 { 1448 /* This function is expected to be called only once for each class: 1449 * - SysBus: via DeviceClass->unrealize(), 1450 * - PCI: via PCIDeviceClass->exit(). 1451 * However to avoid double-free and/or use-after-free we still nullify 1452 * this variable (better safe than sorry!). */ 1453 g_free(s->fifo_buffer); 1454 s->fifo_buffer = NULL; 1455 } 1456 1457 static bool sdhci_pending_insert_vmstate_needed(void *opaque) 1458 { 1459 SDHCIState *s = opaque; 1460 1461 return s->pending_insert_state; 1462 } 1463 1464 static const VMStateDescription sdhci_pending_insert_vmstate = { 1465 .name = "sdhci/pending-insert", 1466 .version_id = 1, 1467 .minimum_version_id = 1, 1468 .needed = sdhci_pending_insert_vmstate_needed, 1469 .fields = (const VMStateField[]) { 1470 VMSTATE_BOOL(pending_insert_state, SDHCIState), 1471 VMSTATE_END_OF_LIST() 1472 }, 1473 }; 1474 1475 const VMStateDescription sdhci_vmstate = { 1476 .name = "sdhci", 1477 .version_id = 1, 1478 .minimum_version_id = 1, 1479 .fields = (const VMStateField[]) { 1480 VMSTATE_UINT32(sdmasysad, SDHCIState), 1481 VMSTATE_UINT16(blksize, SDHCIState), 1482 VMSTATE_UINT16(blkcnt, SDHCIState), 1483 VMSTATE_UINT32(argument, SDHCIState), 1484 VMSTATE_UINT16(trnmod, SDHCIState), 1485 VMSTATE_UINT16(cmdreg, SDHCIState), 1486 VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4), 1487 VMSTATE_UINT32(prnsts, SDHCIState), 1488 VMSTATE_UINT8(hostctl1, SDHCIState), 1489 VMSTATE_UINT8(pwrcon, SDHCIState), 1490 VMSTATE_UINT8(blkgap, SDHCIState), 1491 VMSTATE_UINT8(wakcon, SDHCIState), 1492 VMSTATE_UINT16(clkcon, SDHCIState), 1493 VMSTATE_UINT8(timeoutcon, SDHCIState), 1494 VMSTATE_UINT8(admaerr, SDHCIState), 1495 VMSTATE_UINT16(norintsts, SDHCIState), 1496 VMSTATE_UINT16(errintsts, SDHCIState), 1497 VMSTATE_UINT16(norintstsen, SDHCIState), 1498 VMSTATE_UINT16(errintstsen, SDHCIState), 1499 VMSTATE_UINT16(norintsigen, SDHCIState), 1500 VMSTATE_UINT16(errintsigen, SDHCIState), 1501 VMSTATE_UINT16(acmd12errsts, SDHCIState), 1502 VMSTATE_UINT16(data_count, SDHCIState), 1503 VMSTATE_UINT64(admasysaddr, SDHCIState), 1504 VMSTATE_UINT8(stopped_state, SDHCIState), 1505 VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz), 1506 VMSTATE_TIMER_PTR(insert_timer, SDHCIState), 1507 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), 1508 VMSTATE_END_OF_LIST() 1509 }, 1510 .subsections = (const VMStateDescription * const []) { 1511 &sdhci_pending_insert_vmstate, 1512 NULL 1513 }, 1514 }; 1515 1516 void sdhci_common_class_init(ObjectClass *klass, void *data) 1517 { 1518 DeviceClass *dc = DEVICE_CLASS(klass); 1519 1520 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1521 dc->vmsd = &sdhci_vmstate; 1522 device_class_set_legacy_reset(dc, sdhci_poweron_reset); 1523 } 1524 1525 /* --- qdev SysBus --- */ 1526 1527 static Property sdhci_sysbus_properties[] = { 1528 DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), 1529 DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, 1530 false), 1531 DEFINE_PROP_LINK("dma", SDHCIState, 1532 dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), 1533 DEFINE_PROP_END_OF_LIST(), 1534 }; 1535 1536 static void sdhci_sysbus_init(Object *obj) 1537 { 1538 SDHCIState *s = SYSBUS_SDHCI(obj); 1539 1540 sdhci_initfn(s); 1541 } 1542 1543 static void sdhci_sysbus_finalize(Object *obj) 1544 { 1545 SDHCIState *s = SYSBUS_SDHCI(obj); 1546 1547 if (s->dma_mr) { 1548 object_unparent(OBJECT(s->dma_mr)); 1549 } 1550 1551 sdhci_uninitfn(s); 1552 } 1553 1554 static void sdhci_sysbus_realize(DeviceState *dev, Error **errp) 1555 { 1556 ERRP_GUARD(); 1557 SDHCIState *s = SYSBUS_SDHCI(dev); 1558 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1559 1560 sdhci_common_realize(s, errp); 1561 if (*errp) { 1562 return; 1563 } 1564 1565 if (s->dma_mr) { 1566 s->dma_as = &s->sysbus_dma_as; 1567 address_space_init(s->dma_as, s->dma_mr, "sdhci-dma"); 1568 } else { 1569 /* use system_memory() if property "dma" not set */ 1570 s->dma_as = &address_space_memory; 1571 } 1572 1573 sysbus_init_irq(sbd, &s->irq); 1574 1575 sysbus_init_mmio(sbd, &s->iomem); 1576 } 1577 1578 static void sdhci_sysbus_unrealize(DeviceState *dev) 1579 { 1580 SDHCIState *s = SYSBUS_SDHCI(dev); 1581 1582 sdhci_common_unrealize(s); 1583 1584 if (s->dma_mr) { 1585 address_space_destroy(s->dma_as); 1586 } 1587 } 1588 1589 static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) 1590 { 1591 DeviceClass *dc = DEVICE_CLASS(klass); 1592 1593 device_class_set_props(dc, sdhci_sysbus_properties); 1594 dc->realize = sdhci_sysbus_realize; 1595 dc->unrealize = sdhci_sysbus_unrealize; 1596 1597 sdhci_common_class_init(klass, data); 1598 } 1599 1600 /* --- qdev bus master --- */ 1601 1602 static void sdhci_bus_class_init(ObjectClass *klass, void *data) 1603 { 1604 SDBusClass *sbc = SD_BUS_CLASS(klass); 1605 1606 sbc->set_inserted = sdhci_set_inserted; 1607 sbc->set_readonly = sdhci_set_readonly; 1608 } 1609 1610 /* --- qdev i.MX eSDHC --- */ 1611 1612 #define USDHC_MIX_CTRL 0x48 1613 1614 #define USDHC_VENDOR_SPEC 0xc0 1615 #define USDHC_IMX_FRC_SDCLK_ON (1 << 8) 1616 1617 #define USDHC_DLL_CTRL 0x60 1618 1619 #define USDHC_TUNING_CTRL 0xcc 1620 #define USDHC_TUNE_CTRL_STATUS 0x68 1621 #define USDHC_WTMK_LVL 0x44 1622 1623 /* Undocumented register used by guests working around erratum ERR004536 */ 1624 #define USDHC_UNDOCUMENTED_REG27 0x6c 1625 1626 #define USDHC_CTRL_4BITBUS (0x1 << 1) 1627 #define USDHC_CTRL_8BITBUS (0x2 << 1) 1628 1629 #define USDHC_PRNSTS_SDSTB (1 << 3) 1630 1631 static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) 1632 { 1633 SDHCIState *s = SYSBUS_SDHCI(opaque); 1634 uint32_t ret; 1635 uint16_t hostctl1; 1636 1637 switch (offset) { 1638 default: 1639 return sdhci_read(opaque, offset, size); 1640 1641 case SDHC_HOSTCTL: 1642 /* 1643 * For a detailed explanation on the following bit 1644 * manipulation code see comments in a similar part of 1645 * usdhc_write() 1646 */ 1647 hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3); 1648 1649 if (s->hostctl1 & SDHC_CTRL_8BITBUS) { 1650 hostctl1 |= USDHC_CTRL_8BITBUS; 1651 } 1652 1653 if (s->hostctl1 & SDHC_CTRL_4BITBUS) { 1654 hostctl1 |= USDHC_CTRL_4BITBUS; 1655 } 1656 1657 ret = hostctl1; 1658 ret |= (uint32_t)s->blkgap << 16; 1659 ret |= (uint32_t)s->wakcon << 24; 1660 1661 break; 1662 1663 case SDHC_PRNSTS: 1664 /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ 1665 ret = sdhci_read(opaque, offset, size) & ~USDHC_PRNSTS_SDSTB; 1666 if (s->clkcon & SDHC_CLOCK_INT_STABLE) { 1667 ret |= USDHC_PRNSTS_SDSTB; 1668 } 1669 break; 1670 1671 case USDHC_VENDOR_SPEC: 1672 ret = s->vendor_spec; 1673 break; 1674 case USDHC_DLL_CTRL: 1675 case USDHC_TUNE_CTRL_STATUS: 1676 case USDHC_UNDOCUMENTED_REG27: 1677 case USDHC_TUNING_CTRL: 1678 case USDHC_MIX_CTRL: 1679 case USDHC_WTMK_LVL: 1680 ret = 0; 1681 break; 1682 } 1683 1684 return ret; 1685 } 1686 1687 static void 1688 usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1689 { 1690 SDHCIState *s = SYSBUS_SDHCI(opaque); 1691 uint8_t hostctl1; 1692 uint32_t value = (uint32_t)val; 1693 1694 switch (offset) { 1695 case USDHC_DLL_CTRL: 1696 case USDHC_TUNE_CTRL_STATUS: 1697 case USDHC_UNDOCUMENTED_REG27: 1698 case USDHC_TUNING_CTRL: 1699 case USDHC_WTMK_LVL: 1700 break; 1701 1702 case USDHC_VENDOR_SPEC: 1703 s->vendor_spec = value; 1704 switch (s->vendor) { 1705 case SDHCI_VENDOR_IMX: 1706 if (value & USDHC_IMX_FRC_SDCLK_ON) { 1707 s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; 1708 } else { 1709 s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; 1710 } 1711 break; 1712 default: 1713 break; 1714 } 1715 break; 1716 1717 case SDHC_HOSTCTL: 1718 /* 1719 * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL) 1720 * 1721 * 7 6 5 4 3 2 1 0 1722 * |-----------+--------+--------+-----------+----------+---------| 1723 * | Card | Card | Endian | DATA3 | Data | Led | 1724 * | Detect | Detect | Mode | as Card | Transfer | Control | 1725 * | Signal | Test | | Detection | Width | | 1726 * | Selection | Level | | Pin | | | 1727 * |-----------+--------+--------+-----------+----------+---------| 1728 * 1729 * and 0x29 1730 * 1731 * 15 10 9 8 1732 * |----------+------| 1733 * | Reserved | DMA | 1734 * | | Sel. | 1735 * | | | 1736 * |----------+------| 1737 * 1738 * and here's what SDCHI spec expects those offsets to be: 1739 * 1740 * 0x28 (Host Control Register) 1741 * 1742 * 7 6 5 4 3 2 1 0 1743 * |--------+--------+----------+------+--------+----------+---------| 1744 * | Card | Card | Extended | DMA | High | Data | LED | 1745 * | Detect | Detect | Data | Sel. | Speed | Transfer | Control | 1746 * | Signal | Test | Transfer | | Enable | Width | | 1747 * | Sel. | Level | Width | | | | | 1748 * |--------+--------+----------+------+--------+----------+---------| 1749 * 1750 * and 0x29 (Power Control Register) 1751 * 1752 * |----------------------------------| 1753 * | Power Control Register | 1754 * | | 1755 * | Description omitted, | 1756 * | since it has no analog in ESDHCI | 1757 * | | 1758 * |----------------------------------| 1759 * 1760 * Since offsets 0x2A and 0x2B should be compatible between 1761 * both IP specs we only need to reconcile least 16-bit of the 1762 * word we've been given. 1763 */ 1764 1765 /* 1766 * First, save bits 7 6 and 0 since they are identical 1767 */ 1768 hostctl1 = value & (SDHC_CTRL_LED | 1769 SDHC_CTRL_CDTEST_INS | 1770 SDHC_CTRL_CDTEST_EN); 1771 /* 1772 * Second, split "Data Transfer Width" from bits 2 and 1 in to 1773 * bits 5 and 1 1774 */ 1775 if (value & USDHC_CTRL_8BITBUS) { 1776 hostctl1 |= SDHC_CTRL_8BITBUS; 1777 } 1778 1779 if (value & USDHC_CTRL_4BITBUS) { 1780 hostctl1 |= USDHC_CTRL_4BITBUS; 1781 } 1782 1783 /* 1784 * Third, move DMA select from bits 9 and 8 to bits 4 and 3 1785 */ 1786 hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3)); 1787 1788 /* 1789 * Now place the corrected value into low 16-bit of the value 1790 * we are going to give standard SDHCI write function 1791 * 1792 * NOTE: This transformation should be the inverse of what can 1793 * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux 1794 * kernel 1795 */ 1796 value &= ~UINT16_MAX; 1797 value |= hostctl1; 1798 value |= (uint16_t)s->pwrcon << 8; 1799 1800 sdhci_write(opaque, offset, value, size); 1801 break; 1802 1803 case USDHC_MIX_CTRL: 1804 /* 1805 * So, when SD/MMC stack in Linux tries to write to "Transfer 1806 * Mode Register", ESDHC i.MX quirk code will translate it 1807 * into a write to ESDHC_MIX_CTRL, so we do the opposite in 1808 * order to get where we started 1809 * 1810 * Note that Auto CMD23 Enable bit is located in a wrong place 1811 * on i.MX, but since it is not used by QEMU we do not care. 1812 * 1813 * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...) 1814 * here because it will result in a call to 1815 * sdhci_send_command(s) which we don't want. 1816 * 1817 */ 1818 s->trnmod = value & UINT16_MAX; 1819 break; 1820 case SDHC_TRNMOD: 1821 /* 1822 * Similar to above, but this time a write to "Command 1823 * Register" will be translated into a 4-byte write to 1824 * "Transfer Mode register" where lower 16-bit of value would 1825 * be set to zero. So what we do is fill those bits with 1826 * cached value from s->trnmod and let the SDHCI 1827 * infrastructure handle the rest 1828 */ 1829 sdhci_write(opaque, offset, val | s->trnmod, size); 1830 break; 1831 case SDHC_BLKSIZE: 1832 /* 1833 * ESDHCI does not implement "Host SDMA Buffer Boundary", and 1834 * Linux driver will try to zero this field out which will 1835 * break the rest of SDHCI emulation. 1836 * 1837 * Linux defaults to maximum possible setting (512K boundary) 1838 * and it seems to be the only option that i.MX IP implements, 1839 * so we artificially set it to that value. 1840 */ 1841 val |= 0x7 << 12; 1842 /* FALLTHROUGH */ 1843 default: 1844 sdhci_write(opaque, offset, val, size); 1845 break; 1846 } 1847 } 1848 1849 static const MemoryRegionOps usdhc_mmio_ops = { 1850 .read = usdhc_read, 1851 .write = usdhc_write, 1852 .valid = { 1853 .min_access_size = 1, 1854 .max_access_size = 4, 1855 .unaligned = false 1856 }, 1857 .endianness = DEVICE_LITTLE_ENDIAN, 1858 }; 1859 1860 static void imx_usdhc_init(Object *obj) 1861 { 1862 SDHCIState *s = SYSBUS_SDHCI(obj); 1863 1864 s->io_ops = &usdhc_mmio_ops; 1865 s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; 1866 } 1867 1868 /* --- qdev Samsung s3c --- */ 1869 1870 #define S3C_SDHCI_CONTROL2 0x80 1871 #define S3C_SDHCI_CONTROL3 0x84 1872 #define S3C_SDHCI_CONTROL4 0x8c 1873 1874 static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size) 1875 { 1876 uint64_t ret; 1877 1878 switch (offset) { 1879 case S3C_SDHCI_CONTROL2: 1880 case S3C_SDHCI_CONTROL3: 1881 case S3C_SDHCI_CONTROL4: 1882 /* ignore */ 1883 ret = 0; 1884 break; 1885 default: 1886 ret = sdhci_read(opaque, offset, size); 1887 break; 1888 } 1889 1890 return ret; 1891 } 1892 1893 static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val, 1894 unsigned size) 1895 { 1896 switch (offset) { 1897 case S3C_SDHCI_CONTROL2: 1898 case S3C_SDHCI_CONTROL3: 1899 case S3C_SDHCI_CONTROL4: 1900 /* ignore */ 1901 break; 1902 default: 1903 sdhci_write(opaque, offset, val, size); 1904 break; 1905 } 1906 } 1907 1908 static const MemoryRegionOps sdhci_s3c_mmio_ops = { 1909 .read = sdhci_s3c_read, 1910 .write = sdhci_s3c_write, 1911 .valid = { 1912 .min_access_size = 1, 1913 .max_access_size = 4, 1914 .unaligned = false 1915 }, 1916 .endianness = DEVICE_LITTLE_ENDIAN, 1917 }; 1918 1919 static void sdhci_s3c_init(Object *obj) 1920 { 1921 SDHCIState *s = SYSBUS_SDHCI(obj); 1922 1923 s->io_ops = &sdhci_s3c_mmio_ops; 1924 } 1925 1926 static const TypeInfo sdhci_types[] = { 1927 { 1928 .name = TYPE_SDHCI_BUS, 1929 .parent = TYPE_SD_BUS, 1930 .instance_size = sizeof(SDBus), 1931 .class_init = sdhci_bus_class_init, 1932 }, 1933 { 1934 .name = TYPE_SYSBUS_SDHCI, 1935 .parent = TYPE_SYS_BUS_DEVICE, 1936 .instance_size = sizeof(SDHCIState), 1937 .instance_init = sdhci_sysbus_init, 1938 .instance_finalize = sdhci_sysbus_finalize, 1939 .class_init = sdhci_sysbus_class_init, 1940 }, 1941 { 1942 .name = TYPE_IMX_USDHC, 1943 .parent = TYPE_SYSBUS_SDHCI, 1944 .instance_init = imx_usdhc_init, 1945 }, 1946 { 1947 .name = TYPE_S3C_SDHCI, 1948 .parent = TYPE_SYSBUS_SDHCI, 1949 .instance_init = sdhci_s3c_init, 1950 }, 1951 }; 1952 1953 DEFINE_TYPES(sdhci_types) 1954