1 /* 2 * QEMU model of the Ibex SPI Controller 3 * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/ 4 * 5 * Copyright (C) 2022 Western Digital 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "qemu/log.h" 28 #include "qemu/module.h" 29 #include "hw/ssi/ibex_spi_host.h" 30 #include "hw/irq.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/qdev-properties-system.h" 33 #include "migration/vmstate.h" 34 #include "trace.h" 35 36 REG32(INTR_STATE, 0x00) 37 FIELD(INTR_STATE, ERROR, 0, 1) 38 FIELD(INTR_STATE, SPI_EVENT, 1, 1) 39 REG32(INTR_ENABLE, 0x04) 40 FIELD(INTR_ENABLE, ERROR, 0, 1) 41 FIELD(INTR_ENABLE, SPI_EVENT, 1, 1) 42 REG32(INTR_TEST, 0x08) 43 FIELD(INTR_TEST, ERROR, 0, 1) 44 FIELD(INTR_TEST, SPI_EVENT, 1, 1) 45 REG32(ALERT_TEST, 0x0c) 46 FIELD(ALERT_TEST, FETAL_TEST, 0, 1) 47 REG32(CONTROL, 0x10) 48 FIELD(CONTROL, RX_WATERMARK, 0, 8) 49 FIELD(CONTROL, TX_WATERMARK, 1, 8) 50 FIELD(CONTROL, OUTPUT_EN, 29, 1) 51 FIELD(CONTROL, SW_RST, 30, 1) 52 FIELD(CONTROL, SPIEN, 31, 1) 53 REG32(STATUS, 0x14) 54 FIELD(STATUS, TXQD, 0, 8) 55 FIELD(STATUS, RXQD, 18, 8) 56 FIELD(STATUS, CMDQD, 16, 3) 57 FIELD(STATUS, RXWM, 20, 1) 58 FIELD(STATUS, BYTEORDER, 22, 1) 59 FIELD(STATUS, RXSTALL, 23, 1) 60 FIELD(STATUS, RXEMPTY, 24, 1) 61 FIELD(STATUS, RXFULL, 25, 1) 62 FIELD(STATUS, TXWM, 26, 1) 63 FIELD(STATUS, TXSTALL, 27, 1) 64 FIELD(STATUS, TXEMPTY, 28, 1) 65 FIELD(STATUS, TXFULL, 29, 1) 66 FIELD(STATUS, ACTIVE, 30, 1) 67 FIELD(STATUS, READY, 31, 1) 68 REG32(CONFIGOPTS, 0x18) 69 FIELD(CONFIGOPTS, CLKDIV_0, 0, 16) 70 FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4) 71 FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4) 72 FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4) 73 FIELD(CONFIGOPTS, FULLCYC_0, 29, 1) 74 FIELD(CONFIGOPTS, CPHA_0, 30, 1) 75 FIELD(CONFIGOPTS, CPOL_0, 31, 1) 76 REG32(CSID, 0x1c) 77 FIELD(CSID, CSID, 0, 32) 78 REG32(COMMAND, 0x20) 79 FIELD(COMMAND, LEN, 0, 8) 80 FIELD(COMMAND, CSAAT, 9, 1) 81 FIELD(COMMAND, SPEED, 10, 2) 82 FIELD(COMMAND, DIRECTION, 12, 2) 83 REG32(ERROR_ENABLE, 0x2c) 84 FIELD(ERROR_ENABLE, CMDBUSY, 0, 1) 85 FIELD(ERROR_ENABLE, OVERFLOW, 1, 1) 86 FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1) 87 FIELD(ERROR_ENABLE, CMDINVAL, 3, 1) 88 FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1) 89 REG32(ERROR_STATUS, 0x30) 90 FIELD(ERROR_STATUS, CMDBUSY, 0, 1) 91 FIELD(ERROR_STATUS, OVERFLOW, 1, 1) 92 FIELD(ERROR_STATUS, UNDERFLOW, 2, 1) 93 FIELD(ERROR_STATUS, CMDINVAL, 3, 1) 94 FIELD(ERROR_STATUS, CSIDINVAL, 4, 1) 95 FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1) 96 REG32(EVENT_ENABLE, 0x34) 97 FIELD(EVENT_ENABLE, RXFULL, 0, 1) 98 FIELD(EVENT_ENABLE, TXEMPTY, 1, 1) 99 FIELD(EVENT_ENABLE, RXWM, 2, 1) 100 FIELD(EVENT_ENABLE, TXWM, 3, 1) 101 FIELD(EVENT_ENABLE, READY, 4, 1) 102 FIELD(EVENT_ENABLE, IDLE, 5, 1) 103 104 static inline uint8_t div4_round_up(uint8_t dividend) 105 { 106 return (dividend + 3) / 4; 107 } 108 109 static void ibex_spi_rxfifo_reset(IbexSPIHostState *s) 110 { 111 uint32_t data = s->regs[IBEX_SPI_HOST_STATUS]; 112 /* Empty the RX FIFO and assert RXEMPTY */ 113 fifo8_reset(&s->rx_fifo); 114 data = FIELD_DP32(data, STATUS, RXFULL, 0); 115 data = FIELD_DP32(data, STATUS, RXEMPTY, 1); 116 s->regs[IBEX_SPI_HOST_STATUS] = data; 117 } 118 119 static void ibex_spi_txfifo_reset(IbexSPIHostState *s) 120 { 121 uint32_t data = s->regs[IBEX_SPI_HOST_STATUS]; 122 /* Empty the TX FIFO and assert TXEMPTY */ 123 fifo8_reset(&s->tx_fifo); 124 data = FIELD_DP32(data, STATUS, TXFULL, 0); 125 data = FIELD_DP32(data, STATUS, TXEMPTY, 1); 126 s->regs[IBEX_SPI_HOST_STATUS] = data; 127 } 128 129 static void ibex_spi_host_reset(DeviceState *dev) 130 { 131 IbexSPIHostState *s = IBEX_SPI_HOST(dev); 132 trace_ibex_spi_host_reset("Resetting Ibex SPI"); 133 134 /* SPI Host Register Reset */ 135 s->regs[IBEX_SPI_HOST_INTR_STATE] = 0x00; 136 s->regs[IBEX_SPI_HOST_INTR_ENABLE] = 0x00; 137 s->regs[IBEX_SPI_HOST_INTR_TEST] = 0x00; 138 s->regs[IBEX_SPI_HOST_ALERT_TEST] = 0x00; 139 s->regs[IBEX_SPI_HOST_CONTROL] = 0x7f; 140 s->regs[IBEX_SPI_HOST_STATUS] = 0x00; 141 s->regs[IBEX_SPI_HOST_CONFIGOPTS] = 0x00; 142 s->regs[IBEX_SPI_HOST_CSID] = 0x00; 143 s->regs[IBEX_SPI_HOST_COMMAND] = 0x00; 144 /* RX/TX Modelled by FIFO */ 145 s->regs[IBEX_SPI_HOST_RXDATA] = 0x00; 146 s->regs[IBEX_SPI_HOST_TXDATA] = 0x00; 147 148 s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F; 149 s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00; 150 s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00; 151 152 ibex_spi_rxfifo_reset(s); 153 ibex_spi_txfifo_reset(s); 154 155 s->init_status = true; 156 return; 157 } 158 159 /* 160 * Check if we need to trigger an interrupt. 161 * The two interrupts lines (host_err and event) can 162 * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'. 163 * 164 * Interrupts are triggered based on the ones 165 * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`. 166 */ 167 static void ibex_spi_host_irq(IbexSPIHostState *s) 168 { 169 uint32_t intr_test_reg = s->regs[IBEX_SPI_HOST_INTR_TEST]; 170 uint32_t intr_en_reg = s->regs[IBEX_SPI_HOST_INTR_ENABLE]; 171 uint32_t intr_state_reg = s->regs[IBEX_SPI_HOST_INTR_STATE]; 172 173 uint32_t err_en_reg = s->regs[IBEX_SPI_HOST_ERROR_ENABLE]; 174 uint32_t event_en_reg = s->regs[IBEX_SPI_HOST_EVENT_ENABLE]; 175 uint32_t err_status_reg = s->regs[IBEX_SPI_HOST_ERROR_STATUS]; 176 uint32_t status_reg = s->regs[IBEX_SPI_HOST_STATUS]; 177 178 179 bool error_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, ERROR); 180 bool event_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, SPI_EVENT); 181 bool err_pending = FIELD_EX32(intr_state_reg, INTR_STATE, ERROR); 182 bool status_pending = FIELD_EX32(intr_state_reg, INTR_STATE, SPI_EVENT); 183 184 int err_irq = 0, event_irq = 0; 185 186 /* Error IRQ enabled and Error IRQ Cleared */ 187 if (error_en && !err_pending) { 188 /* Event enabled, Interrupt Test Error */ 189 if (FIELD_EX32(intr_test_reg, INTR_TEST, ERROR)) { 190 err_irq = 1; 191 } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDBUSY) && 192 FIELD_EX32(err_status_reg, ERROR_STATUS, CMDBUSY)) { 193 /* Wrote to COMMAND when not READY */ 194 err_irq = 1; 195 } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDINVAL) && 196 FIELD_EX32(err_status_reg, ERROR_STATUS, CMDINVAL)) { 197 /* Invalid command segment */ 198 err_irq = 1; 199 } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CSIDINVAL) && 200 FIELD_EX32(err_status_reg, ERROR_STATUS, CSIDINVAL)) { 201 /* Invalid value for CSID */ 202 err_irq = 1; 203 } 204 if (err_irq) { 205 s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK; 206 } 207 qemu_set_irq(s->host_err, err_irq); 208 } 209 210 /* Event IRQ Enabled and Event IRQ Cleared */ 211 if (event_en && !status_pending) { 212 if (FIELD_EX32(intr_test_reg, INTR_STATE, SPI_EVENT)) { 213 /* Event enabled, Interrupt Test Event */ 214 event_irq = 1; 215 } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, READY) && 216 FIELD_EX32(status_reg, STATUS, READY)) { 217 /* SPI Host ready for next command */ 218 event_irq = 1; 219 } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, TXEMPTY) && 220 FIELD_EX32(status_reg, STATUS, TXEMPTY)) { 221 /* SPI TXEMPTY, TXFIFO drained */ 222 event_irq = 1; 223 } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, RXFULL) && 224 FIELD_EX32(status_reg, STATUS, RXFULL)) { 225 /* SPI RXFULL, RXFIFO full */ 226 event_irq = 1; 227 } 228 if (event_irq) { 229 s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK; 230 } 231 qemu_set_irq(s->event, event_irq); 232 } 233 } 234 235 static void ibex_spi_host_transfer(IbexSPIHostState *s) 236 { 237 uint32_t rx, tx, data; 238 /* Get num of one byte transfers */ 239 uint8_t segment_len = FIELD_EX32(s->regs[IBEX_SPI_HOST_COMMAND], 240 COMMAND, LEN); 241 242 while (segment_len > 0) { 243 if (fifo8_is_empty(&s->tx_fifo)) { 244 /* Assert Stall */ 245 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK; 246 break; 247 } else if (fifo8_is_full(&s->rx_fifo)) { 248 /* Assert Stall */ 249 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK; 250 break; 251 } else { 252 tx = fifo8_pop(&s->tx_fifo); 253 } 254 255 rx = ssi_transfer(s->ssi, tx); 256 257 trace_ibex_spi_host_transfer(tx, rx); 258 259 if (!fifo8_is_full(&s->rx_fifo)) { 260 fifo8_push(&s->rx_fifo, rx); 261 } else { 262 /* Assert RXFULL */ 263 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK; 264 } 265 --segment_len; 266 } 267 268 data = s->regs[IBEX_SPI_HOST_STATUS]; 269 /* Assert Ready */ 270 data = FIELD_DP32(data, STATUS, READY, 1); 271 /* Set RXQD */ 272 data = FIELD_DP32(data, STATUS, RXQD, div4_round_up(segment_len)); 273 /* Set TXQD */ 274 data = FIELD_DP32(data, STATUS, TXQD, fifo8_num_used(&s->tx_fifo) / 4); 275 /* Clear TXFULL */ 276 data = FIELD_DP32(data, STATUS, TXFULL, 0); 277 /* Reset RXEMPTY */ 278 data = FIELD_DP32(data, STATUS, RXEMPTY, 0); 279 /* Update register status */ 280 s->regs[IBEX_SPI_HOST_STATUS] = data; 281 /* Drop remaining bytes that exceed segment_len */ 282 ibex_spi_txfifo_reset(s); 283 284 ibex_spi_host_irq(s); 285 } 286 287 static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr, 288 unsigned int size) 289 { 290 IbexSPIHostState *s = opaque; 291 uint32_t rc = 0; 292 uint8_t rx_byte = 0; 293 294 trace_ibex_spi_host_read(addr, size); 295 296 /* Match reg index */ 297 addr = addr >> 2; 298 switch (addr) { 299 /* Skipping any W/O registers */ 300 case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE: 301 case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS: 302 rc = s->regs[addr]; 303 break; 304 case IBEX_SPI_HOST_CSID: 305 rc = s->regs[addr]; 306 break; 307 case IBEX_SPI_HOST_CONFIGOPTS: 308 rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]]; 309 break; 310 case IBEX_SPI_HOST_TXDATA: 311 rc = s->regs[addr]; 312 break; 313 case IBEX_SPI_HOST_RXDATA: 314 /* Clear RXFULL */ 315 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK; 316 317 for (int i = 0; i < 4; ++i) { 318 if (fifo8_is_empty(&s->rx_fifo)) { 319 /* Assert RXEMPTY, no IRQ */ 320 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK; 321 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= 322 R_ERROR_STATUS_UNDERFLOW_MASK; 323 return rc; 324 } 325 rx_byte = fifo8_pop(&s->rx_fifo); 326 rc |= rx_byte << (i * 8); 327 } 328 break; 329 case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE: 330 rc = s->regs[addr]; 331 break; 332 default: 333 qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", 334 addr << 2); 335 } 336 return rc; 337 } 338 339 340 static void ibex_spi_host_write(void *opaque, hwaddr addr, 341 uint64_t val64, unsigned int size) 342 { 343 IbexSPIHostState *s = opaque; 344 uint32_t val32 = val64; 345 uint32_t shift_mask = 0xff, status = 0, data = 0; 346 uint8_t txqd_len; 347 348 trace_ibex_spi_host_write(addr, size, val64); 349 350 /* Match reg index */ 351 addr = addr >> 2; 352 353 switch (addr) { 354 /* Skipping any R/O registers */ 355 case IBEX_SPI_HOST_INTR_STATE: 356 /* rw1c status register */ 357 if (FIELD_EX32(val32, INTR_STATE, ERROR)) { 358 data = FIELD_DP32(data, INTR_STATE, ERROR, 0); 359 } 360 if (FIELD_EX32(val32, INTR_STATE, SPI_EVENT)) { 361 data = FIELD_DP32(data, INTR_STATE, SPI_EVENT, 0); 362 } 363 s->regs[addr] = data; 364 break; 365 case IBEX_SPI_HOST_INTR_ENABLE: 366 s->regs[addr] = val32; 367 break; 368 case IBEX_SPI_HOST_INTR_TEST: 369 s->regs[addr] = val32; 370 ibex_spi_host_irq(s); 371 break; 372 case IBEX_SPI_HOST_ALERT_TEST: 373 s->regs[addr] = val32; 374 qemu_log_mask(LOG_UNIMP, 375 "%s: SPI_ALERT_TEST is not supported\n", __func__); 376 break; 377 case IBEX_SPI_HOST_CONTROL: 378 s->regs[addr] = val32; 379 380 if (val32 & R_CONTROL_SW_RST_MASK) { 381 ibex_spi_host_reset((DeviceState *)s); 382 /* Clear active if any */ 383 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_ACTIVE_MASK; 384 } 385 386 if (val32 & R_CONTROL_OUTPUT_EN_MASK) { 387 qemu_log_mask(LOG_UNIMP, 388 "%s: CONTROL_OUTPUT_EN is not supported\n", __func__); 389 } 390 break; 391 case IBEX_SPI_HOST_CONFIGOPTS: 392 /* Update the respective config-opts register based on CSIDth index */ 393 s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32; 394 qemu_log_mask(LOG_UNIMP, 395 "%s: CONFIGOPTS Hardware settings not supported\n", 396 __func__); 397 break; 398 case IBEX_SPI_HOST_CSID: 399 if (val32 >= s->num_cs) { 400 /* CSID exceeds max num_cs */ 401 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= 402 R_ERROR_STATUS_CSIDINVAL_MASK; 403 ibex_spi_host_irq(s); 404 return; 405 } 406 s->regs[addr] = val32; 407 break; 408 case IBEX_SPI_HOST_COMMAND: 409 s->regs[addr] = val32; 410 411 /* STALL, IP not enabled */ 412 if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_CONTROL], 413 CONTROL, SPIEN))) { 414 return; 415 } 416 417 /* SPI not ready, IRQ Error */ 418 if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_STATUS], 419 STATUS, READY))) { 420 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK; 421 ibex_spi_host_irq(s); 422 return; 423 } 424 425 /* Assert Not Ready */ 426 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK; 427 428 if (FIELD_EX32(val32, COMMAND, DIRECTION) != BIDIRECTIONAL_TRANSFER) { 429 qemu_log_mask(LOG_UNIMP, 430 "%s: Rx Only/Tx Only are not supported\n", __func__); 431 } 432 433 if (val32 & R_COMMAND_CSAAT_MASK) { 434 qemu_log_mask(LOG_UNIMP, 435 "%s: CSAAT is not supported\n", __func__); 436 } 437 if (val32 & R_COMMAND_SPEED_MASK) { 438 qemu_log_mask(LOG_UNIMP, 439 "%s: SPEED is not supported\n", __func__); 440 } 441 442 /* Set Transfer Callback */ 443 timer_mod(s->fifo_trigger_handle, 444 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 445 (TX_INTERRUPT_TRIGGER_DELAY_NS)); 446 447 break; 448 case IBEX_SPI_HOST_TXDATA: 449 /* 450 * This is a hardware `feature` where 451 * the first word written to TXDATA after init is omitted entirely 452 */ 453 if (s->init_status) { 454 s->init_status = false; 455 return; 456 } 457 458 for (int i = 0; i < 4; ++i) { 459 /* Attempting to write when TXFULL */ 460 if (fifo8_is_full(&s->tx_fifo)) { 461 /* Assert RXEMPTY, no IRQ */ 462 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK; 463 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= 464 R_ERROR_STATUS_OVERFLOW_MASK; 465 ibex_spi_host_irq(s); 466 return; 467 } 468 /* Byte ordering is set by the IP */ 469 status = s->regs[IBEX_SPI_HOST_STATUS]; 470 if (FIELD_EX32(status, STATUS, BYTEORDER) == 0) { 471 /* LE: LSB transmitted first (default for ibex processor) */ 472 shift_mask = 0xff << (i * 8); 473 } else { 474 /* BE: MSB transmitted first */ 475 qemu_log_mask(LOG_UNIMP, 476 "%s: Big endian is not supported\n", __func__); 477 } 478 479 fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8)); 480 } 481 status = s->regs[IBEX_SPI_HOST_STATUS]; 482 /* Reset TXEMPTY */ 483 status = FIELD_DP32(status, STATUS, TXEMPTY, 0); 484 /* Update TXQD */ 485 txqd_len = FIELD_EX32(status, STATUS, TXQD); 486 /* Partial bytes (size < 4) are padded, in words. */ 487 txqd_len += 1; 488 status = FIELD_DP32(status, STATUS, TXQD, txqd_len); 489 /* Assert Ready */ 490 status = FIELD_DP32(status, STATUS, READY, 1); 491 /* Update register status */ 492 s->regs[IBEX_SPI_HOST_STATUS] = status; 493 break; 494 case IBEX_SPI_HOST_ERROR_ENABLE: 495 s->regs[addr] = val32; 496 497 if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK) { 498 qemu_log_mask(LOG_UNIMP, 499 "%s: Segment Length is not supported\n", __func__); 500 } 501 break; 502 case IBEX_SPI_HOST_ERROR_STATUS: 503 /* 504 * Indicates any errors that have occurred. 505 * When an error occurs, the corresponding bit must be cleared 506 * here before issuing any further commands 507 */ 508 status = s->regs[addr]; 509 /* rw1c status register */ 510 if (FIELD_EX32(val32, ERROR_STATUS, CMDBUSY)) { 511 status = FIELD_DP32(status, ERROR_STATUS, CMDBUSY, 0); 512 } 513 if (FIELD_EX32(val32, ERROR_STATUS, OVERFLOW)) { 514 status = FIELD_DP32(status, ERROR_STATUS, OVERFLOW, 0); 515 } 516 if (FIELD_EX32(val32, ERROR_STATUS, UNDERFLOW)) { 517 status = FIELD_DP32(status, ERROR_STATUS, UNDERFLOW, 0); 518 } 519 if (FIELD_EX32(val32, ERROR_STATUS, CMDINVAL)) { 520 status = FIELD_DP32(status, ERROR_STATUS, CMDINVAL, 0); 521 } 522 if (FIELD_EX32(val32, ERROR_STATUS, CSIDINVAL)) { 523 status = FIELD_DP32(status, ERROR_STATUS, CSIDINVAL, 0); 524 } 525 if (FIELD_EX32(val32, ERROR_STATUS, ACCESSINVAL)) { 526 status = FIELD_DP32(status, ERROR_STATUS, ACCESSINVAL, 0); 527 } 528 s->regs[addr] = status; 529 break; 530 case IBEX_SPI_HOST_EVENT_ENABLE: 531 /* Controls which classes of SPI events raise an interrupt. */ 532 s->regs[addr] = val32; 533 534 if (val32 & R_EVENT_ENABLE_RXWM_MASK) { 535 qemu_log_mask(LOG_UNIMP, 536 "%s: RXWM is not supported\n", __func__); 537 } 538 if (val32 & R_EVENT_ENABLE_TXWM_MASK) { 539 qemu_log_mask(LOG_UNIMP, 540 "%s: TXWM is not supported\n", __func__); 541 } 542 543 if (val32 & R_EVENT_ENABLE_IDLE_MASK) { 544 qemu_log_mask(LOG_UNIMP, 545 "%s: IDLE is not supported\n", __func__); 546 } 547 break; 548 default: 549 qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", 550 addr << 2); 551 } 552 } 553 554 static const MemoryRegionOps ibex_spi_ops = { 555 .read = ibex_spi_host_read, 556 .write = ibex_spi_host_write, 557 /* Ibex default LE */ 558 .endianness = DEVICE_LITTLE_ENDIAN, 559 }; 560 561 static Property ibex_spi_properties[] = { 562 DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1), 563 DEFINE_PROP_END_OF_LIST(), 564 }; 565 566 static const VMStateDescription vmstate_ibex = { 567 .name = TYPE_IBEX_SPI_HOST, 568 .version_id = 1, 569 .minimum_version_id = 1, 570 .fields = (VMStateField[]) { 571 VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS), 572 VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState, 573 num_cs, 0, vmstate_info_uint32, uint32_t), 574 VMSTATE_FIFO8(rx_fifo, IbexSPIHostState), 575 VMSTATE_FIFO8(tx_fifo, IbexSPIHostState), 576 VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState), 577 VMSTATE_BOOL(init_status, IbexSPIHostState), 578 VMSTATE_END_OF_LIST() 579 } 580 }; 581 582 static void fifo_trigger_update(void *opaque) 583 { 584 IbexSPIHostState *s = opaque; 585 ibex_spi_host_transfer(s); 586 } 587 588 static void ibex_spi_host_realize(DeviceState *dev, Error **errp) 589 { 590 IbexSPIHostState *s = IBEX_SPI_HOST(dev); 591 int i; 592 593 s->ssi = ssi_create_bus(dev, "ssi"); 594 s->cs_lines = g_new0(qemu_irq, s->num_cs); 595 596 for (i = 0; i < s->num_cs; ++i) { 597 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]); 598 } 599 600 /* Setup CONFIGOPTS Multi-register */ 601 s->config_opts = g_new0(uint32_t, s->num_cs); 602 603 /* Setup FIFO Interrupt Timer */ 604 s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, 605 fifo_trigger_update, s); 606 607 /* FIFO sizes as per OT Spec */ 608 fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN); 609 fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN); 610 } 611 612 static void ibex_spi_host_init(Object *obj) 613 { 614 IbexSPIHostState *s = IBEX_SPI_HOST(obj); 615 616 sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err); 617 sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event); 618 619 memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s, 620 TYPE_IBEX_SPI_HOST, 0x1000); 621 sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); 622 } 623 624 static void ibex_spi_host_class_init(ObjectClass *klass, void *data) 625 { 626 DeviceClass *dc = DEVICE_CLASS(klass); 627 dc->realize = ibex_spi_host_realize; 628 dc->reset = ibex_spi_host_reset; 629 dc->vmsd = &vmstate_ibex; 630 device_class_set_props(dc, ibex_spi_properties); 631 } 632 633 static const TypeInfo ibex_spi_host_info = { 634 .name = TYPE_IBEX_SPI_HOST, 635 .parent = TYPE_SYS_BUS_DEVICE, 636 .instance_size = sizeof(IbexSPIHostState), 637 .instance_init = ibex_spi_host_init, 638 .class_init = ibex_spi_host_class_init, 639 }; 640 641 static void ibex_spi_host_register_types(void) 642 { 643 type_register_static(&ibex_spi_host_info); 644 } 645 646 type_init(ibex_spi_host_register_types) 647