1 /* 2 * QEMU model of the Ibex SPI Controller 3 * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/ 4 * 5 * Copyright (C) 2022 Western Digital 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qemu/log.h" 28 #include "qemu/module.h" 29 #include "hw/registerfields.h" 30 #include "hw/ssi/ibex_spi_host.h" 31 #include "hw/irq.h" 32 #include "hw/qdev-properties.h" 33 #include "hw/qdev-properties-system.h" 34 #include "migration/vmstate.h" 35 #include "trace.h" 36 37 REG32(INTR_STATE, 0x00) 38 FIELD(INTR_STATE, ERROR, 0, 1) 39 FIELD(INTR_STATE, SPI_EVENT, 1, 1) 40 REG32(INTR_ENABLE, 0x04) 41 FIELD(INTR_ENABLE, ERROR, 0, 1) 42 FIELD(INTR_ENABLE, SPI_EVENT, 1, 1) 43 REG32(INTR_TEST, 0x08) 44 FIELD(INTR_TEST, ERROR, 0, 1) 45 FIELD(INTR_TEST, SPI_EVENT, 1, 1) 46 REG32(ALERT_TEST, 0x0c) 47 FIELD(ALERT_TEST, FETAL_TEST, 0, 1) 48 REG32(CONTROL, 0x10) 49 FIELD(CONTROL, RX_WATERMARK, 0, 8) 50 FIELD(CONTROL, TX_WATERMARK, 1, 8) 51 FIELD(CONTROL, OUTPUT_EN, 29, 1) 52 FIELD(CONTROL, SW_RST, 30, 1) 53 FIELD(CONTROL, SPIEN, 31, 1) 54 REG32(STATUS, 0x14) 55 FIELD(STATUS, TXQD, 0, 8) 56 FIELD(STATUS, RXQD, 18, 8) 57 FIELD(STATUS, CMDQD, 16, 3) 58 FIELD(STATUS, RXWM, 20, 1) 59 FIELD(STATUS, BYTEORDER, 22, 1) 60 FIELD(STATUS, RXSTALL, 23, 1) 61 FIELD(STATUS, RXEMPTY, 24, 1) 62 FIELD(STATUS, RXFULL, 25, 1) 63 FIELD(STATUS, TXWM, 26, 1) 64 FIELD(STATUS, TXSTALL, 27, 1) 65 FIELD(STATUS, TXEMPTY, 28, 1) 66 FIELD(STATUS, TXFULL, 29, 1) 67 FIELD(STATUS, ACTIVE, 30, 1) 68 FIELD(STATUS, READY, 31, 1) 69 REG32(CONFIGOPTS, 0x18) 70 FIELD(CONFIGOPTS, CLKDIV_0, 0, 16) 71 FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4) 72 FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4) 73 FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4) 74 FIELD(CONFIGOPTS, FULLCYC_0, 29, 1) 75 FIELD(CONFIGOPTS, CPHA_0, 30, 1) 76 FIELD(CONFIGOPTS, CPOL_0, 31, 1) 77 REG32(CSID, 0x1c) 78 FIELD(CSID, CSID, 0, 32) 79 REG32(COMMAND, 0x20) 80 FIELD(COMMAND, LEN, 0, 8) 81 FIELD(COMMAND, CSAAT, 9, 1) 82 FIELD(COMMAND, SPEED, 10, 2) 83 FIELD(COMMAND, DIRECTION, 12, 2) 84 REG32(ERROR_ENABLE, 0x2c) 85 FIELD(ERROR_ENABLE, CMDBUSY, 0, 1) 86 FIELD(ERROR_ENABLE, OVERFLOW, 1, 1) 87 FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1) 88 FIELD(ERROR_ENABLE, CMDINVAL, 3, 1) 89 FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1) 90 REG32(ERROR_STATUS, 0x30) 91 FIELD(ERROR_STATUS, CMDBUSY, 0, 1) 92 FIELD(ERROR_STATUS, OVERFLOW, 1, 1) 93 FIELD(ERROR_STATUS, UNDERFLOW, 2, 1) 94 FIELD(ERROR_STATUS, CMDINVAL, 3, 1) 95 FIELD(ERROR_STATUS, CSIDINVAL, 4, 1) 96 FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1) 97 REG32(EVENT_ENABLE, 0x34) 98 FIELD(EVENT_ENABLE, RXFULL, 0, 1) 99 FIELD(EVENT_ENABLE, TXEMPTY, 1, 1) 100 FIELD(EVENT_ENABLE, RXWM, 2, 1) 101 FIELD(EVENT_ENABLE, TXWM, 3, 1) 102 FIELD(EVENT_ENABLE, READY, 4, 1) 103 FIELD(EVENT_ENABLE, IDLE, 5, 1) 104 105 static inline uint8_t div4_round_up(uint8_t dividend) 106 { 107 return (dividend + 3) / 4; 108 } 109 110 static void ibex_spi_rxfifo_reset(IbexSPIHostState *s) 111 { 112 uint32_t data = s->regs[IBEX_SPI_HOST_STATUS]; 113 /* Empty the RX FIFO and assert RXEMPTY */ 114 fifo8_reset(&s->rx_fifo); 115 data = FIELD_DP32(data, STATUS, RXFULL, 0); 116 data = FIELD_DP32(data, STATUS, RXEMPTY, 1); 117 s->regs[IBEX_SPI_HOST_STATUS] = data; 118 } 119 120 static void ibex_spi_txfifo_reset(IbexSPIHostState *s) 121 { 122 uint32_t data = s->regs[IBEX_SPI_HOST_STATUS]; 123 /* Empty the TX FIFO and assert TXEMPTY */ 124 fifo8_reset(&s->tx_fifo); 125 data = FIELD_DP32(data, STATUS, TXFULL, 0); 126 data = FIELD_DP32(data, STATUS, TXEMPTY, 1); 127 s->regs[IBEX_SPI_HOST_STATUS] = data; 128 } 129 130 static void ibex_spi_host_reset(DeviceState *dev) 131 { 132 IbexSPIHostState *s = IBEX_SPI_HOST(dev); 133 trace_ibex_spi_host_reset("Resetting Ibex SPI"); 134 135 /* SPI Host Register Reset */ 136 s->regs[IBEX_SPI_HOST_INTR_STATE] = 0x00; 137 s->regs[IBEX_SPI_HOST_INTR_ENABLE] = 0x00; 138 s->regs[IBEX_SPI_HOST_INTR_TEST] = 0x00; 139 s->regs[IBEX_SPI_HOST_ALERT_TEST] = 0x00; 140 s->regs[IBEX_SPI_HOST_CONTROL] = 0x7f; 141 s->regs[IBEX_SPI_HOST_STATUS] = 0x00; 142 s->regs[IBEX_SPI_HOST_CONFIGOPTS] = 0x00; 143 s->regs[IBEX_SPI_HOST_CSID] = 0x00; 144 s->regs[IBEX_SPI_HOST_COMMAND] = 0x00; 145 /* RX/TX Modelled by FIFO */ 146 s->regs[IBEX_SPI_HOST_RXDATA] = 0x00; 147 s->regs[IBEX_SPI_HOST_TXDATA] = 0x00; 148 149 s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F; 150 s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00; 151 s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00; 152 153 ibex_spi_rxfifo_reset(s); 154 ibex_spi_txfifo_reset(s); 155 156 s->init_status = true; 157 return; 158 } 159 160 /* 161 * Check if we need to trigger an interrupt. 162 * The two interrupts lines (host_err and event) can 163 * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'. 164 * 165 * Interrupts are triggered based on the ones 166 * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`. 167 */ 168 static void ibex_spi_host_irq(IbexSPIHostState *s) 169 { 170 uint32_t intr_test_reg = s->regs[IBEX_SPI_HOST_INTR_TEST]; 171 uint32_t intr_en_reg = s->regs[IBEX_SPI_HOST_INTR_ENABLE]; 172 uint32_t intr_state_reg = s->regs[IBEX_SPI_HOST_INTR_STATE]; 173 174 uint32_t err_en_reg = s->regs[IBEX_SPI_HOST_ERROR_ENABLE]; 175 uint32_t event_en_reg = s->regs[IBEX_SPI_HOST_EVENT_ENABLE]; 176 uint32_t err_status_reg = s->regs[IBEX_SPI_HOST_ERROR_STATUS]; 177 uint32_t status_reg = s->regs[IBEX_SPI_HOST_STATUS]; 178 179 180 bool error_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, ERROR); 181 bool event_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, SPI_EVENT); 182 bool err_pending = FIELD_EX32(intr_state_reg, INTR_STATE, ERROR); 183 bool status_pending = FIELD_EX32(intr_state_reg, INTR_STATE, SPI_EVENT); 184 185 int err_irq = 0, event_irq = 0; 186 187 /* Error IRQ enabled and Error IRQ Cleared */ 188 if (error_en && !err_pending) { 189 /* Event enabled, Interrupt Test Error */ 190 if (FIELD_EX32(intr_test_reg, INTR_TEST, ERROR)) { 191 err_irq = 1; 192 } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDBUSY) && 193 FIELD_EX32(err_status_reg, ERROR_STATUS, CMDBUSY)) { 194 /* Wrote to COMMAND when not READY */ 195 err_irq = 1; 196 } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDINVAL) && 197 FIELD_EX32(err_status_reg, ERROR_STATUS, CMDINVAL)) { 198 /* Invalid command segment */ 199 err_irq = 1; 200 } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CSIDINVAL) && 201 FIELD_EX32(err_status_reg, ERROR_STATUS, CSIDINVAL)) { 202 /* Invalid value for CSID */ 203 err_irq = 1; 204 } 205 if (err_irq) { 206 s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK; 207 } 208 qemu_set_irq(s->host_err, err_irq); 209 } 210 211 /* Event IRQ Enabled and Event IRQ Cleared */ 212 if (event_en && !status_pending) { 213 if (FIELD_EX32(intr_test_reg, INTR_STATE, SPI_EVENT)) { 214 /* Event enabled, Interrupt Test Event */ 215 event_irq = 1; 216 } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, READY) && 217 FIELD_EX32(status_reg, STATUS, READY)) { 218 /* SPI Host ready for next command */ 219 event_irq = 1; 220 } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, TXEMPTY) && 221 FIELD_EX32(status_reg, STATUS, TXEMPTY)) { 222 /* SPI TXEMPTY, TXFIFO drained */ 223 event_irq = 1; 224 } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, RXFULL) && 225 FIELD_EX32(status_reg, STATUS, RXFULL)) { 226 /* SPI RXFULL, RXFIFO full */ 227 event_irq = 1; 228 } 229 if (event_irq) { 230 s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK; 231 } 232 qemu_set_irq(s->event, event_irq); 233 } 234 } 235 236 static void ibex_spi_host_transfer(IbexSPIHostState *s) 237 { 238 uint32_t rx, tx, data; 239 /* Get num of one byte transfers */ 240 uint8_t segment_len = FIELD_EX32(s->regs[IBEX_SPI_HOST_COMMAND], 241 COMMAND, LEN); 242 243 while (segment_len > 0) { 244 if (fifo8_is_empty(&s->tx_fifo)) { 245 /* Assert Stall */ 246 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK; 247 break; 248 } else if (fifo8_is_full(&s->rx_fifo)) { 249 /* Assert Stall */ 250 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK; 251 break; 252 } else { 253 tx = fifo8_pop(&s->tx_fifo); 254 } 255 256 rx = ssi_transfer(s->ssi, tx); 257 258 trace_ibex_spi_host_transfer(tx, rx); 259 260 if (!fifo8_is_full(&s->rx_fifo)) { 261 fifo8_push(&s->rx_fifo, rx); 262 } else { 263 /* Assert RXFULL */ 264 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK; 265 } 266 --segment_len; 267 } 268 269 data = s->regs[IBEX_SPI_HOST_STATUS]; 270 /* Assert Ready */ 271 data = FIELD_DP32(data, STATUS, READY, 1); 272 /* Set RXQD */ 273 data = FIELD_DP32(data, STATUS, RXQD, div4_round_up(segment_len)); 274 /* Set TXQD */ 275 data = FIELD_DP32(data, STATUS, TXQD, fifo8_num_used(&s->tx_fifo) / 4); 276 /* Clear TXFULL */ 277 data = FIELD_DP32(data, STATUS, TXFULL, 0); 278 /* Reset RXEMPTY */ 279 data = FIELD_DP32(data, STATUS, RXEMPTY, 0); 280 /* Update register status */ 281 s->regs[IBEX_SPI_HOST_STATUS] = data; 282 /* Drop remaining bytes that exceed segment_len */ 283 ibex_spi_txfifo_reset(s); 284 285 ibex_spi_host_irq(s); 286 } 287 288 static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr, 289 unsigned int size) 290 { 291 IbexSPIHostState *s = opaque; 292 uint32_t rc = 0; 293 uint8_t rx_byte = 0; 294 295 trace_ibex_spi_host_read(addr, size); 296 297 /* Match reg index */ 298 addr = addr >> 2; 299 switch (addr) { 300 /* Skipping any W/O registers */ 301 case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE: 302 case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS: 303 rc = s->regs[addr]; 304 break; 305 case IBEX_SPI_HOST_CSID: 306 rc = s->regs[addr]; 307 break; 308 case IBEX_SPI_HOST_CONFIGOPTS: 309 rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]]; 310 break; 311 case IBEX_SPI_HOST_TXDATA: 312 rc = s->regs[addr]; 313 break; 314 case IBEX_SPI_HOST_RXDATA: 315 /* Clear RXFULL */ 316 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK; 317 318 for (int i = 0; i < 4; ++i) { 319 if (fifo8_is_empty(&s->rx_fifo)) { 320 /* Assert RXEMPTY, no IRQ */ 321 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK; 322 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= 323 R_ERROR_STATUS_UNDERFLOW_MASK; 324 return rc; 325 } 326 rx_byte = fifo8_pop(&s->rx_fifo); 327 rc |= rx_byte << (i * 8); 328 } 329 break; 330 case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE: 331 rc = s->regs[addr]; 332 break; 333 default: 334 qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", 335 addr << 2); 336 } 337 return rc; 338 } 339 340 341 static void ibex_spi_host_write(void *opaque, hwaddr addr, 342 uint64_t val64, unsigned int size) 343 { 344 IbexSPIHostState *s = opaque; 345 uint32_t val32 = val64; 346 uint32_t shift_mask = 0xff, status = 0, data = 0; 347 uint8_t txqd_len; 348 349 trace_ibex_spi_host_write(addr, size, val64); 350 351 /* Match reg index */ 352 addr = addr >> 2; 353 354 switch (addr) { 355 /* Skipping any R/O registers */ 356 case IBEX_SPI_HOST_INTR_STATE: 357 /* rw1c status register */ 358 if (FIELD_EX32(val32, INTR_STATE, ERROR)) { 359 data = FIELD_DP32(data, INTR_STATE, ERROR, 0); 360 } 361 if (FIELD_EX32(val32, INTR_STATE, SPI_EVENT)) { 362 data = FIELD_DP32(data, INTR_STATE, SPI_EVENT, 0); 363 } 364 s->regs[addr] = data; 365 break; 366 case IBEX_SPI_HOST_INTR_ENABLE: 367 s->regs[addr] = val32; 368 break; 369 case IBEX_SPI_HOST_INTR_TEST: 370 s->regs[addr] = val32; 371 ibex_spi_host_irq(s); 372 break; 373 case IBEX_SPI_HOST_ALERT_TEST: 374 s->regs[addr] = val32; 375 qemu_log_mask(LOG_UNIMP, 376 "%s: SPI_ALERT_TEST is not supported\n", __func__); 377 break; 378 case IBEX_SPI_HOST_CONTROL: 379 s->regs[addr] = val32; 380 381 if (val32 & R_CONTROL_SW_RST_MASK) { 382 ibex_spi_host_reset((DeviceState *)s); 383 /* Clear active if any */ 384 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_ACTIVE_MASK; 385 } 386 387 if (val32 & R_CONTROL_OUTPUT_EN_MASK) { 388 qemu_log_mask(LOG_UNIMP, 389 "%s: CONTROL_OUTPUT_EN is not supported\n", __func__); 390 } 391 break; 392 case IBEX_SPI_HOST_CONFIGOPTS: 393 /* Update the respective config-opts register based on CSIDth index */ 394 s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32; 395 qemu_log_mask(LOG_UNIMP, 396 "%s: CONFIGOPTS Hardware settings not supported\n", 397 __func__); 398 break; 399 case IBEX_SPI_HOST_CSID: 400 if (val32 >= s->num_cs) { 401 /* CSID exceeds max num_cs */ 402 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= 403 R_ERROR_STATUS_CSIDINVAL_MASK; 404 ibex_spi_host_irq(s); 405 return; 406 } 407 s->regs[addr] = val32; 408 break; 409 case IBEX_SPI_HOST_COMMAND: 410 s->regs[addr] = val32; 411 412 /* STALL, IP not enabled */ 413 if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_CONTROL], 414 CONTROL, SPIEN))) { 415 return; 416 } 417 418 /* SPI not ready, IRQ Error */ 419 if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_STATUS], 420 STATUS, READY))) { 421 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK; 422 ibex_spi_host_irq(s); 423 return; 424 } 425 426 /* Assert Not Ready */ 427 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK; 428 429 if (FIELD_EX32(val32, COMMAND, DIRECTION) != BIDIRECTIONAL_TRANSFER) { 430 qemu_log_mask(LOG_UNIMP, 431 "%s: Rx Only/Tx Only are not supported\n", __func__); 432 } 433 434 if (val32 & R_COMMAND_CSAAT_MASK) { 435 qemu_log_mask(LOG_UNIMP, 436 "%s: CSAAT is not supported\n", __func__); 437 } 438 if (val32 & R_COMMAND_SPEED_MASK) { 439 qemu_log_mask(LOG_UNIMP, 440 "%s: SPEED is not supported\n", __func__); 441 } 442 443 /* Set Transfer Callback */ 444 timer_mod(s->fifo_trigger_handle, 445 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 446 (TX_INTERRUPT_TRIGGER_DELAY_NS)); 447 448 break; 449 case IBEX_SPI_HOST_TXDATA: 450 /* 451 * This is a hardware `feature` where 452 * the first word written to TXDATA after init is omitted entirely 453 */ 454 if (s->init_status) { 455 s->init_status = false; 456 return; 457 } 458 459 for (int i = 0; i < 4; ++i) { 460 /* Attempting to write when TXFULL */ 461 if (fifo8_is_full(&s->tx_fifo)) { 462 /* Assert RXEMPTY, no IRQ */ 463 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK; 464 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= 465 R_ERROR_STATUS_OVERFLOW_MASK; 466 ibex_spi_host_irq(s); 467 return; 468 } 469 /* Byte ordering is set by the IP */ 470 status = s->regs[IBEX_SPI_HOST_STATUS]; 471 if (FIELD_EX32(status, STATUS, BYTEORDER) == 0) { 472 /* LE: LSB transmitted first (default for ibex processor) */ 473 shift_mask = 0xff << (i * 8); 474 } else { 475 /* BE: MSB transmitted first */ 476 qemu_log_mask(LOG_UNIMP, 477 "%s: Big endian is not supported\n", __func__); 478 } 479 480 fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8)); 481 } 482 status = s->regs[IBEX_SPI_HOST_STATUS]; 483 /* Reset TXEMPTY */ 484 status = FIELD_DP32(status, STATUS, TXEMPTY, 0); 485 /* Update TXQD */ 486 txqd_len = FIELD_EX32(status, STATUS, TXQD); 487 /* Partial bytes (size < 4) are padded, in words. */ 488 txqd_len += 1; 489 status = FIELD_DP32(status, STATUS, TXQD, txqd_len); 490 /* Assert Ready */ 491 status = FIELD_DP32(status, STATUS, READY, 1); 492 /* Update register status */ 493 s->regs[IBEX_SPI_HOST_STATUS] = status; 494 break; 495 case IBEX_SPI_HOST_ERROR_ENABLE: 496 s->regs[addr] = val32; 497 498 if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK) { 499 qemu_log_mask(LOG_UNIMP, 500 "%s: Segment Length is not supported\n", __func__); 501 } 502 break; 503 case IBEX_SPI_HOST_ERROR_STATUS: 504 /* 505 * Indicates any errors that have occurred. 506 * When an error occurs, the corresponding bit must be cleared 507 * here before issuing any further commands 508 */ 509 status = s->regs[addr]; 510 /* rw1c status register */ 511 if (FIELD_EX32(val32, ERROR_STATUS, CMDBUSY)) { 512 status = FIELD_DP32(status, ERROR_STATUS, CMDBUSY, 0); 513 } 514 if (FIELD_EX32(val32, ERROR_STATUS, OVERFLOW)) { 515 status = FIELD_DP32(status, ERROR_STATUS, OVERFLOW, 0); 516 } 517 if (FIELD_EX32(val32, ERROR_STATUS, UNDERFLOW)) { 518 status = FIELD_DP32(status, ERROR_STATUS, UNDERFLOW, 0); 519 } 520 if (FIELD_EX32(val32, ERROR_STATUS, CMDINVAL)) { 521 status = FIELD_DP32(status, ERROR_STATUS, CMDINVAL, 0); 522 } 523 if (FIELD_EX32(val32, ERROR_STATUS, CSIDINVAL)) { 524 status = FIELD_DP32(status, ERROR_STATUS, CSIDINVAL, 0); 525 } 526 if (FIELD_EX32(val32, ERROR_STATUS, ACCESSINVAL)) { 527 status = FIELD_DP32(status, ERROR_STATUS, ACCESSINVAL, 0); 528 } 529 s->regs[addr] = status; 530 break; 531 case IBEX_SPI_HOST_EVENT_ENABLE: 532 /* Controls which classes of SPI events raise an interrupt. */ 533 s->regs[addr] = val32; 534 535 if (val32 & R_EVENT_ENABLE_RXWM_MASK) { 536 qemu_log_mask(LOG_UNIMP, 537 "%s: RXWM is not supported\n", __func__); 538 } 539 if (val32 & R_EVENT_ENABLE_TXWM_MASK) { 540 qemu_log_mask(LOG_UNIMP, 541 "%s: TXWM is not supported\n", __func__); 542 } 543 544 if (val32 & R_EVENT_ENABLE_IDLE_MASK) { 545 qemu_log_mask(LOG_UNIMP, 546 "%s: IDLE is not supported\n", __func__); 547 } 548 break; 549 default: 550 qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", 551 addr << 2); 552 } 553 } 554 555 static const MemoryRegionOps ibex_spi_ops = { 556 .read = ibex_spi_host_read, 557 .write = ibex_spi_host_write, 558 /* Ibex default LE */ 559 .endianness = DEVICE_LITTLE_ENDIAN, 560 }; 561 562 static Property ibex_spi_properties[] = { 563 DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1), 564 DEFINE_PROP_END_OF_LIST(), 565 }; 566 567 static const VMStateDescription vmstate_ibex = { 568 .name = TYPE_IBEX_SPI_HOST, 569 .version_id = 1, 570 .minimum_version_id = 1, 571 .fields = (VMStateField[]) { 572 VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS), 573 VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState, 574 num_cs, 0, vmstate_info_uint32, uint32_t), 575 VMSTATE_FIFO8(rx_fifo, IbexSPIHostState), 576 VMSTATE_FIFO8(tx_fifo, IbexSPIHostState), 577 VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState), 578 VMSTATE_BOOL(init_status, IbexSPIHostState), 579 VMSTATE_END_OF_LIST() 580 } 581 }; 582 583 static void fifo_trigger_update(void *opaque) 584 { 585 IbexSPIHostState *s = opaque; 586 ibex_spi_host_transfer(s); 587 } 588 589 static void ibex_spi_host_realize(DeviceState *dev, Error **errp) 590 { 591 IbexSPIHostState *s = IBEX_SPI_HOST(dev); 592 int i; 593 594 s->ssi = ssi_create_bus(dev, "ssi"); 595 s->cs_lines = g_new0(qemu_irq, s->num_cs); 596 597 for (i = 0; i < s->num_cs; ++i) { 598 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]); 599 } 600 601 /* Setup CONFIGOPTS Multi-register */ 602 s->config_opts = g_new0(uint32_t, s->num_cs); 603 604 /* Setup FIFO Interrupt Timer */ 605 s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, 606 fifo_trigger_update, s); 607 608 /* FIFO sizes as per OT Spec */ 609 fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN); 610 fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN); 611 } 612 613 static void ibex_spi_host_init(Object *obj) 614 { 615 IbexSPIHostState *s = IBEX_SPI_HOST(obj); 616 617 sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err); 618 sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event); 619 620 memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s, 621 TYPE_IBEX_SPI_HOST, 0x1000); 622 sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); 623 } 624 625 static void ibex_spi_host_class_init(ObjectClass *klass, void *data) 626 { 627 DeviceClass *dc = DEVICE_CLASS(klass); 628 dc->realize = ibex_spi_host_realize; 629 dc->reset = ibex_spi_host_reset; 630 dc->vmsd = &vmstate_ibex; 631 device_class_set_props(dc, ibex_spi_properties); 632 } 633 634 static const TypeInfo ibex_spi_host_info = { 635 .name = TYPE_IBEX_SPI_HOST, 636 .parent = TYPE_SYS_BUS_DEVICE, 637 .instance_size = sizeof(IbexSPIHostState), 638 .instance_init = ibex_spi_host_init, 639 .class_init = ibex_spi_host_class_init, 640 }; 641 642 static void ibex_spi_host_register_types(void) 643 { 644 type_register_static(&ibex_spi_host_info); 645 } 646 647 type_init(ibex_spi_host_register_types) 648