xref: /openbmc/qemu/hw/ssi/ibex_spi_host.c (revision 5c5e0445)
1 /*
2  * QEMU model of the Ibex SPI Controller
3  * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
4  *
5  * Copyright (C) 2022 Western Digital
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "qemu/log.h"
28 #include "qemu/module.h"
29 #include "hw/ssi/ibex_spi_host.h"
30 #include "hw/irq.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/qdev-properties-system.h"
33 #include "migration/vmstate.h"
34 #include "trace.h"
35 
36 REG32(INTR_STATE, 0x00)
37     FIELD(INTR_STATE, ERROR, 0, 1)
38     FIELD(INTR_STATE, SPI_EVENT, 1, 1)
39 REG32(INTR_ENABLE, 0x04)
40     FIELD(INTR_ENABLE, ERROR, 0, 1)
41     FIELD(INTR_ENABLE, SPI_EVENT, 1, 1)
42 REG32(INTR_TEST, 0x08)
43     FIELD(INTR_TEST, ERROR, 0, 1)
44     FIELD(INTR_TEST, SPI_EVENT, 1, 1)
45 REG32(ALERT_TEST, 0x0c)
46     FIELD(ALERT_TEST, FETAL_TEST, 0, 1)
47 REG32(CONTROL, 0x10)
48     FIELD(CONTROL, RX_WATERMARK, 0, 8)
49     FIELD(CONTROL, TX_WATERMARK, 1, 8)
50     FIELD(CONTROL, OUTPUT_EN, 29, 1)
51     FIELD(CONTROL, SW_RST, 30, 1)
52     FIELD(CONTROL, SPIEN, 31, 1)
53 REG32(STATUS, 0x14)
54     FIELD(STATUS, TXQD, 0, 8)
55     FIELD(STATUS, RXQD, 18, 8)
56     FIELD(STATUS, CMDQD, 16, 3)
57     FIELD(STATUS, RXWM, 20, 1)
58     FIELD(STATUS, BYTEORDER, 22, 1)
59     FIELD(STATUS, RXSTALL, 23, 1)
60     FIELD(STATUS, RXEMPTY, 24, 1)
61     FIELD(STATUS, RXFULL, 25, 1)
62     FIELD(STATUS, TXWM, 26, 1)
63     FIELD(STATUS, TXSTALL, 27, 1)
64     FIELD(STATUS, TXEMPTY, 28, 1)
65     FIELD(STATUS, TXFULL, 29, 1)
66     FIELD(STATUS, ACTIVE, 30, 1)
67     FIELD(STATUS, READY, 31, 1)
68 REG32(CONFIGOPTS, 0x18)
69     FIELD(CONFIGOPTS, CLKDIV_0, 0, 16)
70     FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4)
71     FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4)
72     FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4)
73     FIELD(CONFIGOPTS, FULLCYC_0, 29, 1)
74     FIELD(CONFIGOPTS, CPHA_0, 30, 1)
75     FIELD(CONFIGOPTS, CPOL_0, 31, 1)
76 REG32(CSID, 0x1c)
77     FIELD(CSID, CSID, 0, 32)
78 REG32(COMMAND, 0x20)
79     FIELD(COMMAND, LEN, 0, 8)
80     FIELD(COMMAND, CSAAT, 9, 1)
81     FIELD(COMMAND, SPEED, 10, 2)
82     FIELD(COMMAND, DIRECTION, 12, 2)
83 REG32(ERROR_ENABLE, 0x2c)
84     FIELD(ERROR_ENABLE, CMDBUSY, 0, 1)
85     FIELD(ERROR_ENABLE, OVERFLOW, 1, 1)
86     FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1)
87     FIELD(ERROR_ENABLE, CMDINVAL, 3, 1)
88     FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1)
89 REG32(ERROR_STATUS, 0x30)
90     FIELD(ERROR_STATUS, CMDBUSY, 0, 1)
91     FIELD(ERROR_STATUS, OVERFLOW, 1, 1)
92     FIELD(ERROR_STATUS, UNDERFLOW, 2, 1)
93     FIELD(ERROR_STATUS, CMDINVAL, 3, 1)
94     FIELD(ERROR_STATUS, CSIDINVAL, 4, 1)
95     FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1)
96 REG32(EVENT_ENABLE, 0x30)
97     FIELD(EVENT_ENABLE, RXFULL, 0, 1)
98     FIELD(EVENT_ENABLE, TXEMPTY, 1, 1)
99     FIELD(EVENT_ENABLE, RXWM, 2, 1)
100     FIELD(EVENT_ENABLE, TXWM, 3, 1)
101     FIELD(EVENT_ENABLE, READY, 4, 1)
102     FIELD(EVENT_ENABLE, IDLE, 5, 1)
103 
104 static inline uint8_t div4_round_up(uint8_t dividend)
105 {
106     return (dividend + 3) / 4;
107 }
108 
109 static void ibex_spi_rxfifo_reset(IbexSPIHostState *s)
110 {
111     /* Empty the RX FIFO and assert RXEMPTY */
112     fifo8_reset(&s->rx_fifo);
113     s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
114     s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
115 }
116 
117 static void ibex_spi_txfifo_reset(IbexSPIHostState *s)
118 {
119     /* Empty the TX FIFO and assert TXEMPTY */
120     fifo8_reset(&s->tx_fifo);
121     s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK;
122     s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXEMPTY_MASK;
123 }
124 
125 static void ibex_spi_host_reset(DeviceState *dev)
126 {
127     IbexSPIHostState *s = IBEX_SPI_HOST(dev);
128     trace_ibex_spi_host_reset("Resetting Ibex SPI");
129 
130     /* SPI Host Register Reset */
131     s->regs[IBEX_SPI_HOST_INTR_STATE]   = 0x00;
132     s->regs[IBEX_SPI_HOST_INTR_ENABLE]  = 0x00;
133     s->regs[IBEX_SPI_HOST_INTR_TEST]    = 0x00;
134     s->regs[IBEX_SPI_HOST_ALERT_TEST]   = 0x00;
135     s->regs[IBEX_SPI_HOST_CONTROL]      = 0x7f;
136     s->regs[IBEX_SPI_HOST_STATUS]       = 0x00;
137     s->regs[IBEX_SPI_HOST_CONFIGOPTS]   = 0x00;
138     s->regs[IBEX_SPI_HOST_CSID]         = 0x00;
139     s->regs[IBEX_SPI_HOST_COMMAND]      = 0x00;
140     /* RX/TX Modelled by FIFO */
141     s->regs[IBEX_SPI_HOST_RXDATA]       = 0x00;
142     s->regs[IBEX_SPI_HOST_TXDATA]       = 0x00;
143 
144     s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F;
145     s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00;
146     s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00;
147 
148     ibex_spi_rxfifo_reset(s);
149     ibex_spi_txfifo_reset(s);
150 
151     s->init_status = true;
152     return;
153 }
154 
155 /*
156  * Check if we need to trigger an interrupt.
157  * The two interrupts lines (host_err and event) can
158  * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'.
159  *
160  * Interrupts are triggered based on the ones
161  * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`.
162  */
163 static void ibex_spi_host_irq(IbexSPIHostState *s)
164 {
165     bool error_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE]
166                     & R_INTR_ENABLE_ERROR_MASK;
167     bool event_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE]
168                     & R_INTR_ENABLE_SPI_EVENT_MASK;
169     bool err_pending = s->regs[IBEX_SPI_HOST_INTR_STATE]
170                         & R_INTR_STATE_ERROR_MASK;
171     bool status_pending = s->regs[IBEX_SPI_HOST_INTR_STATE]
172                         & R_INTR_STATE_SPI_EVENT_MASK;
173     int err_irq = 0, event_irq = 0;
174 
175     /* Error IRQ enabled and Error IRQ Cleared*/
176     if (error_en && !err_pending) {
177         /* Event enabled, Interrupt Test Error */
178         if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_ERROR_MASK) {
179             err_irq = 1;
180         } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
181                     &  R_ERROR_ENABLE_CMDBUSY_MASK) &&
182                     s->regs[IBEX_SPI_HOST_ERROR_STATUS]
183                     & R_ERROR_STATUS_CMDBUSY_MASK) {
184             /* Wrote to COMMAND when not READY */
185             err_irq = 1;
186         } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
187                     &  R_ERROR_ENABLE_CMDINVAL_MASK) &&
188                     s->regs[IBEX_SPI_HOST_ERROR_STATUS]
189                     & R_ERROR_STATUS_CMDINVAL_MASK) {
190             /* Invalid command segment */
191             err_irq = 1;
192         } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
193                     & R_ERROR_ENABLE_CSIDINVAL_MASK) &&
194                     s->regs[IBEX_SPI_HOST_ERROR_STATUS]
195                     & R_ERROR_STATUS_CSIDINVAL_MASK) {
196             /* Invalid value for CSID */
197             err_irq = 1;
198         }
199         if (err_irq) {
200             s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
201         }
202         qemu_set_irq(s->host_err, err_irq);
203     }
204 
205     /* Event IRQ Enabled and Event IRQ Cleared */
206     if (event_en && !status_pending) {
207         if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_SPI_EVENT_MASK) {
208             /* Event enabled, Interrupt Test Event */
209             event_irq = 1;
210         } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
211                     & R_EVENT_ENABLE_READY_MASK) &&
212                     (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) {
213             /* SPI Host ready for next command */
214             event_irq = 1;
215         } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
216                     & R_EVENT_ENABLE_TXEMPTY_MASK) &&
217                     (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_TXEMPTY_MASK)) {
218             /* SPI TXEMPTY, TXFIFO drained */
219             event_irq = 1;
220         } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
221                     & R_EVENT_ENABLE_RXFULL_MASK) &&
222                     (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_RXFULL_MASK)) {
223             /* SPI RXFULL, RXFIFO  full */
224             event_irq = 1;
225         }
226         if (event_irq) {
227             s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
228         }
229         qemu_set_irq(s->event, event_irq);
230     }
231 }
232 
233 static void ibex_spi_host_transfer(IbexSPIHostState *s)
234 {
235     uint32_t rx, tx;
236     /* Get num of one byte transfers */
237     uint8_t segment_len = ((s->regs[IBEX_SPI_HOST_COMMAND] & R_COMMAND_LEN_MASK)
238                           >> R_COMMAND_LEN_SHIFT);
239     while (segment_len > 0) {
240         if (fifo8_is_empty(&s->tx_fifo)) {
241             /* Assert Stall */
242             s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK;
243             break;
244         } else if (fifo8_is_full(&s->rx_fifo)) {
245             /* Assert Stall */
246             s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK;
247             break;
248         } else {
249             tx = fifo8_pop(&s->tx_fifo);
250         }
251 
252         rx = ssi_transfer(s->ssi, tx);
253 
254         trace_ibex_spi_host_transfer(tx, rx);
255 
256         if (!fifo8_is_full(&s->rx_fifo)) {
257             fifo8_push(&s->rx_fifo, rx);
258         } else {
259             /* Assert RXFULL */
260             s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK;
261         }
262         --segment_len;
263     }
264 
265     /* Assert Ready */
266     s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK;
267     /* Set RXQD */
268     s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXQD_MASK;
269     s->regs[IBEX_SPI_HOST_STATUS] |= (R_STATUS_RXQD_MASK
270                                     & div4_round_up(segment_len));
271     /* Set TXQD */
272     s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK;
273     s->regs[IBEX_SPI_HOST_STATUS] |= (fifo8_num_used(&s->tx_fifo) / 4)
274                                     & R_STATUS_TXQD_MASK;
275     /* Clear TXFULL */
276     s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK;
277     /* Assert TXEMPTY and drop remaining bytes that exceed segment_len */
278     ibex_spi_txfifo_reset(s);
279     /* Reset RXEMPTY */
280     s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXEMPTY_MASK;
281 
282     ibex_spi_host_irq(s);
283 }
284 
285 static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr,
286                                      unsigned int size)
287 {
288     IbexSPIHostState *s = opaque;
289     uint32_t rc = 0;
290     uint8_t rx_byte = 0;
291 
292     trace_ibex_spi_host_read(addr, size);
293 
294     /* Match reg index */
295     addr = addr >> 2;
296     switch (addr) {
297     /* Skipping any W/O registers */
298     case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
299     case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS:
300         rc = s->regs[addr];
301         break;
302     case IBEX_SPI_HOST_CSID:
303         rc = s->regs[addr];
304         break;
305     case IBEX_SPI_HOST_CONFIGOPTS:
306         rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]];
307         break;
308     case IBEX_SPI_HOST_TXDATA:
309         rc = s->regs[addr];
310         break;
311     case IBEX_SPI_HOST_RXDATA:
312         /* Clear RXFULL */
313         s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
314 
315         for (int i = 0; i < 4; ++i) {
316             if (fifo8_is_empty(&s->rx_fifo)) {
317                 /* Assert RXEMPTY, no IRQ */
318                 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
319                 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
320                                                 R_ERROR_STATUS_UNDERFLOW_MASK;
321                 return rc;
322             }
323             rx_byte = fifo8_pop(&s->rx_fifo);
324             rc |= rx_byte << (i * 8);
325         }
326         break;
327     case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE:
328         rc = s->regs[addr];
329         break;
330     default:
331         qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
332                       addr << 2);
333     }
334     return rc;
335 }
336 
337 
338 static void ibex_spi_host_write(void *opaque, hwaddr addr,
339                                 uint64_t val64, unsigned int size)
340 {
341     IbexSPIHostState *s = opaque;
342     uint32_t val32 = val64;
343     uint32_t shift_mask = 0xff;
344     uint8_t txqd_len;
345 
346     trace_ibex_spi_host_write(addr, size, val64);
347 
348     /* Match reg index */
349     addr = addr >> 2;
350 
351     switch (addr) {
352     /* Skipping any R/O registers */
353     case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
354         s->regs[addr] = val32;
355         break;
356     case IBEX_SPI_HOST_INTR_TEST:
357         s->regs[addr] = val32;
358         ibex_spi_host_irq(s);
359         break;
360     case IBEX_SPI_HOST_ALERT_TEST:
361         s->regs[addr] = val32;
362         qemu_log_mask(LOG_UNIMP,
363                         "%s: SPI_ALERT_TEST is not supported\n", __func__);
364         break;
365     case IBEX_SPI_HOST_CONTROL:
366         s->regs[addr] = val32;
367 
368         if (val32 & R_CONTROL_SW_RST_MASK)  {
369             ibex_spi_host_reset((DeviceState *)s);
370             /* Clear active if any */
371             s->regs[IBEX_SPI_HOST_STATUS] &=  ~R_STATUS_ACTIVE_MASK;
372         }
373 
374         if (val32 & R_CONTROL_OUTPUT_EN_MASK)  {
375             qemu_log_mask(LOG_UNIMP,
376                           "%s: CONTROL_OUTPUT_EN is not supported\n", __func__);
377         }
378         break;
379     case IBEX_SPI_HOST_CONFIGOPTS:
380         /* Update the respective config-opts register based on CSIDth index */
381         s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32;
382         qemu_log_mask(LOG_UNIMP,
383                       "%s: CONFIGOPTS Hardware settings not supported\n",
384                          __func__);
385         break;
386     case IBEX_SPI_HOST_CSID:
387         if (val32 >= s->num_cs) {
388             /* CSID exceeds max num_cs */
389             s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
390                                                 R_ERROR_STATUS_CSIDINVAL_MASK;
391             ibex_spi_host_irq(s);
392             return;
393         }
394         s->regs[addr] = val32;
395         break;
396     case IBEX_SPI_HOST_COMMAND:
397         s->regs[addr] = val32;
398 
399         /* STALL, IP not enabled */
400         if (!(s->regs[IBEX_SPI_HOST_CONTROL] & R_CONTROL_SPIEN_MASK)) {
401             return;
402         }
403 
404         /* SPI not ready, IRQ Error */
405         if (!(s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) {
406             s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK;
407             ibex_spi_host_irq(s);
408             return;
409         }
410         /* Assert Not Ready */
411         s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK;
412 
413         if (((val32 & R_COMMAND_DIRECTION_MASK) >> R_COMMAND_DIRECTION_SHIFT)
414             != BIDIRECTIONAL_TRANSFER) {
415                 qemu_log_mask(LOG_UNIMP,
416                           "%s: Rx Only/Tx Only are not supported\n", __func__);
417         }
418 
419         if (val32 & R_COMMAND_CSAAT_MASK)  {
420             qemu_log_mask(LOG_UNIMP,
421                           "%s: CSAAT is not supported\n", __func__);
422         }
423         if (val32 & R_COMMAND_SPEED_MASK)  {
424             qemu_log_mask(LOG_UNIMP,
425                           "%s: SPEED is not supported\n", __func__);
426         }
427 
428         /* Set Transfer Callback */
429         timer_mod(s->fifo_trigger_handle,
430                     qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
431                     (TX_INTERRUPT_TRIGGER_DELAY_NS));
432 
433         break;
434     case IBEX_SPI_HOST_TXDATA:
435         /*
436          * This is a hardware `feature` where
437          * the first word written TXDATA after init is omitted entirely
438          */
439         if (s->init_status) {
440             s->init_status = false;
441             return;
442         }
443 
444         for (int i = 0; i < 4; ++i) {
445             /* Attempting to write when TXFULL */
446             if (fifo8_is_full(&s->tx_fifo)) {
447                 /* Assert RXEMPTY, no IRQ */
448                 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK;
449                 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
450                                                  R_ERROR_STATUS_OVERFLOW_MASK;
451                 ibex_spi_host_irq(s);
452                 return;
453             }
454             /* Byte ordering is set by the IP */
455             if ((s->regs[IBEX_SPI_HOST_STATUS] &
456                 R_STATUS_BYTEORDER_MASK) == 0) {
457                 /* LE: LSB transmitted first (default for ibex processor) */
458                 shift_mask = 0xff << (i * 8);
459             } else {
460                 /* BE: MSB transmitted first */
461                 qemu_log_mask(LOG_UNIMP,
462                              "%s: Big endian is not supported\n", __func__);
463             }
464 
465             fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8));
466         }
467 
468         /* Reset TXEMPTY */
469         s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXEMPTY_MASK;
470         /* Update TXQD */
471         txqd_len = (s->regs[IBEX_SPI_HOST_STATUS] &
472                     R_STATUS_TXQD_MASK) >> R_STATUS_TXQD_SHIFT;
473         /* Partial bytes (size < 4) are padded, in words. */
474         txqd_len += 1;
475         s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK;
476         s->regs[IBEX_SPI_HOST_STATUS] |= txqd_len;
477         /* Assert Ready */
478         s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK;
479         break;
480     case IBEX_SPI_HOST_ERROR_ENABLE:
481         s->regs[addr] = val32;
482 
483         if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK)  {
484             qemu_log_mask(LOG_UNIMP,
485                           "%s: Segment Length is not supported\n", __func__);
486         }
487         break;
488     case IBEX_SPI_HOST_ERROR_STATUS:
489     /*
490      *  Indicates that any errors that have occurred.
491      *  When an error occurs, the corresponding bit must be cleared
492      *  here before issuing any further commands
493      */
494         s->regs[addr] = val32;
495         break;
496     case IBEX_SPI_HOST_EVENT_ENABLE:
497     /* Controls which classes of SPI events raise an interrupt. */
498         s->regs[addr] = val32;
499 
500         if (val32 & R_EVENT_ENABLE_RXWM_MASK)  {
501             qemu_log_mask(LOG_UNIMP,
502                           "%s: RXWM is not supported\n", __func__);
503         }
504         if (val32 & R_EVENT_ENABLE_TXWM_MASK)  {
505             qemu_log_mask(LOG_UNIMP,
506                           "%s: TXWM is not supported\n", __func__);
507         }
508 
509         if (val32 & R_EVENT_ENABLE_IDLE_MASK)  {
510             qemu_log_mask(LOG_UNIMP,
511                           "%s: IDLE is not supported\n", __func__);
512         }
513         break;
514     default:
515         qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
516                       addr << 2);
517     }
518 }
519 
520 static const MemoryRegionOps ibex_spi_ops = {
521     .read = ibex_spi_host_read,
522     .write = ibex_spi_host_write,
523     /* Ibex default LE */
524     .endianness = DEVICE_LITTLE_ENDIAN,
525 };
526 
527 static Property ibex_spi_properties[] = {
528     DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1),
529     DEFINE_PROP_END_OF_LIST(),
530 };
531 
532 static const VMStateDescription vmstate_ibex = {
533     .name = TYPE_IBEX_SPI_HOST,
534     .version_id = 1,
535     .minimum_version_id = 1,
536     .fields = (VMStateField[]) {
537         VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS),
538         VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState,
539                               num_cs, 0, vmstate_info_uint32, uint32_t),
540         VMSTATE_FIFO8(rx_fifo, IbexSPIHostState),
541         VMSTATE_FIFO8(tx_fifo, IbexSPIHostState),
542         VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState),
543         VMSTATE_BOOL(init_status, IbexSPIHostState),
544         VMSTATE_END_OF_LIST()
545     }
546 };
547 
548 static void fifo_trigger_update(void *opaque)
549 {
550     IbexSPIHostState *s = opaque;
551     ibex_spi_host_transfer(s);
552 }
553 
554 static void ibex_spi_host_realize(DeviceState *dev, Error **errp)
555 {
556     IbexSPIHostState *s = IBEX_SPI_HOST(dev);
557     int i;
558 
559     s->ssi = ssi_create_bus(dev, "ssi");
560     s->cs_lines = g_new0(qemu_irq, s->num_cs);
561 
562     for (i = 0; i < s->num_cs; ++i) {
563         sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
564     }
565 
566     /* Setup CONFIGOPTS Multi-register */
567     s->config_opts = g_new0(uint32_t, s->num_cs);
568 
569     /* Setup FIFO Interrupt Timer */
570     s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
571                                           fifo_trigger_update, s);
572 
573     /* FIFO sizes as per OT Spec */
574     fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN);
575     fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN);
576 }
577 
578 static void ibex_spi_host_init(Object *obj)
579 {
580     IbexSPIHostState *s = IBEX_SPI_HOST(obj);
581 
582     sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err);
583     sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event);
584 
585     memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s,
586                           TYPE_IBEX_SPI_HOST, 0x1000);
587     sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
588 }
589 
590 static void ibex_spi_host_class_init(ObjectClass *klass, void *data)
591 {
592     DeviceClass *dc = DEVICE_CLASS(klass);
593     dc->realize = ibex_spi_host_realize;
594     dc->reset = ibex_spi_host_reset;
595     dc->vmsd = &vmstate_ibex;
596     device_class_set_props(dc, ibex_spi_properties);
597 }
598 
599 static const TypeInfo ibex_spi_host_info = {
600     .name          = TYPE_IBEX_SPI_HOST,
601     .parent        = TYPE_SYS_BUS_DEVICE,
602     .instance_size = sizeof(IbexSPIHostState),
603     .instance_init = ibex_spi_host_init,
604     .class_init    = ibex_spi_host_class_init,
605 };
606 
607 static void ibex_spi_host_register_types(void)
608 {
609     type_register_static(&ibex_spi_host_info);
610 }
611 
612 type_init(ibex_spi_host_register_types)
613