xref: /openbmc/qemu/hw/ssi/pnv_spi.c (revision bb44dc48)
1 /*
2  * QEMU PowerPC SPI model
3  *
4  * Copyright (c) 2024, IBM Corporation.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "hw/qdev-properties.h"
12 #include "hw/ppc/pnv_xscom.h"
13 #include "hw/ssi/pnv_spi.h"
14 #include "hw/ssi/pnv_spi_regs.h"
15 #include "hw/ssi/ssi.h"
16 #include <libfdt.h>
17 #include "hw/irq.h"
18 #include "trace.h"
19 
20 #define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
21 #define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
22 
23 /*
24  * Macro from include/hw/ppc/fdt.h
25  * fdt.h cannot be included here as it contain ppc target specific dependency.
26  */
27 #define _FDT(exp)                                                  \
28     do {                                                           \
29         int _ret = (exp);                                          \
30         if (_ret < 0) {                                            \
31             qemu_log_mask(LOG_GUEST_ERROR,                         \
32                     "error creating device tree: %s: %s",          \
33                     #exp, fdt_strerror(_ret));                     \
34             exit(1);                                               \
35         }                                                          \
36     } while (0)
37 
38 /* PnvXferBuffer */
39 typedef struct PnvXferBuffer {
40 
41     uint32_t    len;
42     uint8_t    *data;
43 
44 } PnvXferBuffer;
45 
46 /* pnv_spi_xfer_buffer_methods */
pnv_spi_xfer_buffer_new(void)47 static PnvXferBuffer *pnv_spi_xfer_buffer_new(void)
48 {
49     PnvXferBuffer *payload = g_malloc0(sizeof(*payload));
50 
51     return payload;
52 }
53 
pnv_spi_xfer_buffer_free(PnvXferBuffer * payload)54 static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload)
55 {
56     free(payload->data);
57     free(payload);
58 }
59 
pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer * payload,uint32_t offset,uint32_t length)60 static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload,
61                 uint32_t offset, uint32_t length)
62 {
63     if (payload->len < (offset + length)) {
64         payload->len = offset + length;
65         payload->data = g_realloc(payload->data, payload->len);
66     }
67     return &payload->data[offset];
68 }
69 
does_rdr_match(PnvSpi * s)70 static bool does_rdr_match(PnvSpi *s)
71 {
72     /*
73      * According to spec, the mask bits that are 0 are compared and the
74      * bits that are 1 are ignored.
75      */
76     uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK,
77                                         s->regs[SPI_MM_REG]);
78     uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL,
79                                         s->regs[SPI_MM_REG]);
80 
81     if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
82             GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
83         return true;
84     }
85     return false;
86 }
87 
get_from_offset(PnvSpi * s,uint8_t offset)88 static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
89 {
90     uint8_t byte;
91 
92     /*
93      * Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
94      * Check the offset before using it.
95      */
96     if (offset < PNV_SPI_REG_SIZE) {
97         byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
98     } else {
99         /*
100          * Log an error and return a 0xFF since we have to assign something
101          * to byte before returning.
102          */
103         qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
104                       "from TDR\n", offset);
105         byte = 0xff;
106     }
107     return byte;
108 }
109 
read_from_frame(PnvSpi * s,uint8_t * read_buf,uint8_t nr_bytes,uint8_t ecc_count,uint8_t shift_in_count)110 static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes,
111                 uint8_t ecc_count, uint8_t shift_in_count)
112 {
113     uint8_t byte;
114     int count = 0;
115 
116     while (count < nr_bytes) {
117         shift_in_count++;
118         if ((ecc_count != 0) &&
119             (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
120             shift_in_count = 0;
121         } else {
122             byte = read_buf[count];
123             trace_pnv_spi_shift_rx(byte, count);
124             s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
125         }
126         count++;
127     } /* end of while */
128     return shift_in_count;
129 }
130 
spi_response(PnvSpi * s,int bits,PnvXferBuffer * rsp_payload)131 static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
132 {
133     uint8_t ecc_count;
134     uint8_t shift_in_count;
135 
136     /*
137      * Processing here must handle:
138      * - Which bytes in the payload we should move to the RDR
139      * - Explicit mode counter configuration settings
140      * - RDR full and RDR overrun status
141      */
142 
143     /*
144      * First check that the response payload is the exact same
145      * number of bytes as the request payload was
146      */
147     if (rsp_payload->len != (s->N1_bytes + s->N2_bytes)) {
148         qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
149                        "bytes, expected %d, got %d\n",
150                        (s->N1_bytes + s->N2_bytes), rsp_payload->len);
151     } else {
152         uint8_t ecc_control;
153         trace_pnv_spi_rx_received(rsp_payload->len);
154         trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
155                         s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
156         /*
157          * Adding an ECC count let's us know when we have found a payload byte
158          * that was shifted in but cannot be loaded into RDR.  Bits 29-30 of
159          * clock_config_reset_control register equal to either 0b00 or 0b10
160          * indicate that we are taking in data with ECC and either applying
161          * the ECC or discarding it.
162          */
163         ecc_count = 0;
164         ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
165         if (ecc_control == 0 || ecc_control == 2) {
166             ecc_count = 1;
167         }
168         /*
169          * Use the N1_rx and N2_rx counts to control shifting data from the
170          * payload into the RDR.  Keep an overall count of the number of bytes
171          * shifted into RDR so we can discard every 9th byte when ECC is
172          * enabled.
173          */
174         shift_in_count = 0;
175         /* Handle the N1 portion of the frame first */
176         if (s->N1_rx != 0) {
177             trace_pnv_spi_rx_read_N1frame();
178             shift_in_count = read_from_frame(s, &rsp_payload->data[0],
179                             s->N1_bytes, ecc_count, shift_in_count);
180         }
181         /* Handle the N2 portion of the frame */
182         if (s->N2_rx != 0) {
183             trace_pnv_spi_rx_read_N2frame();
184             shift_in_count = read_from_frame(s,
185                             &rsp_payload->data[s->N1_bytes], s->N2_bytes,
186                             ecc_count, shift_in_count);
187         }
188         if ((s->N1_rx + s->N2_rx) > 0) {
189             /*
190              * Data was received so handle RDR status.
191              * It is easier to handle RDR_full and RDR_overrun status here
192              * since the RDR register's shift_byte_in method is called
193              * multiple times in a row. Controlling RDR status is done here
194              * instead of in the RDR scoped methods for that reason.
195              */
196             if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
197                 /*
198                  * Data was shifted into the RDR before having been read
199                  * causing previous data to have been overrun.
200                  */
201                 s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
202             } else {
203                 /*
204                  * Set status to indicate that the received data register is
205                  * full. This flag is only cleared once the RDR is unloaded.
206                  */
207                 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
208             }
209         }
210     } /* end of else */
211 } /* end of spi_response() */
212 
transfer(PnvSpi * s,PnvXferBuffer * payload)213 static void transfer(PnvSpi *s, PnvXferBuffer *payload)
214 {
215     uint32_t tx;
216     uint32_t rx;
217     PnvXferBuffer *rsp_payload = NULL;
218 
219     rsp_payload = pnv_spi_xfer_buffer_new();
220     for (int offset = 0; offset < payload->len; offset += s->transfer_len) {
221         tx = 0;
222         for (int i = 0; i < s->transfer_len; i++) {
223             if ((offset + i) >= payload->len) {
224                 tx <<= 8;
225             } else {
226                 tx = (tx << 8) | payload->data[offset + i];
227             }
228         }
229         rx = ssi_transfer(s->ssi_bus, tx);
230         for (int i = 0; i < s->transfer_len; i++) {
231             if ((offset + i) >= payload->len) {
232                 break;
233             }
234             *(pnv_spi_xfer_buffer_write_ptr(rsp_payload, rsp_payload->len, 1)) =
235                     (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
236         }
237     }
238     if (rsp_payload != NULL) {
239         spi_response(s, s->N1_bits, rsp_payload);
240     }
241 }
242 
get_seq_index(PnvSpi * s)243 static inline uint8_t get_seq_index(PnvSpi *s)
244 {
245     return GETFIELD(SPI_STS_SEQ_INDEX, s->status);
246 }
247 
next_sequencer_fsm(PnvSpi * s)248 static inline void next_sequencer_fsm(PnvSpi *s)
249 {
250     uint8_t seq_index = get_seq_index(s);
251     s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, (seq_index + 1));
252     s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
253 }
254 
255 /*
256  * Calculate the N1 counters based on passed in opcode and
257  * internal register values.
258  * The method assumes that the opcode is a Shift_N1 opcode
259  * and doesn't test it.
260  * The counters returned are:
261  * N1 bits: Number of bits in the payload data that are significant
262  * to the responder.
263  * N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
264  * N1_tx: Total number of bytes taken from TDR for N1
265  * N1_rx: Total number of bytes taken from the payload for N1
266  */
calculate_N1(PnvSpi * s,uint8_t opcode)267 static void calculate_N1(PnvSpi *s, uint8_t opcode)
268 {
269     /*
270      * Shift_N1 opcode form: 0x3M
271      * Implicit mode:
272      * If M != 0 the shift count is M bytes and M is the number of tx bytes.
273      * Forced Implicit mode:
274      * M is the shift count but tx and rx is determined by the count control
275      * register fields.  Note that we only check for forced Implicit mode when
276      * M != 0 since the mode doesn't make sense when M = 0.
277      * Explicit mode:
278      * If M == 0 then shift count is number of bits defined in the
279      * Counter Configuration Register's shift_count_N1 field.
280      */
281     if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
282         /* Explicit mode */
283         s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
284         s->N1_bytes = (s->N1_bits + 7) / 8;
285         s->N1_tx = 0;
286         s->N1_rx = 0;
287         /* If tx count control for N1 is set, load the tx value */
288         if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
289             s->N1_tx = s->N1_bytes;
290         }
291         /* If rx count control for N1 is set, load the rx value */
292         if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
293             s->N1_rx = s->N1_bytes;
294         }
295     } else {
296         /* Implicit mode/Forced Implicit mode, use M field from opcode */
297         s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
298         s->N1_bits = s->N1_bytes * 8;
299         /*
300          * Assume that we are going to transmit the count
301          * (pure Implicit only)
302          */
303         s->N1_tx = s->N1_bytes;
304         s->N1_rx = 0;
305         /* Let Forced Implicit mode have an effect on the counts */
306         if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
307             /*
308              * If Forced Implicit mode and count control doesn't
309              * indicate transmit then reset the tx count to 0
310              */
311             if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2,
312                                     s->regs[SPI_CTR_CFG_REG]) == 0) {
313                 s->N1_tx = 0;
314             }
315             /* If rx count control for N1 is set, load the rx value */
316             if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3,
317                                     s->regs[SPI_CTR_CFG_REG]) == 1) {
318                 s->N1_rx = s->N1_bytes;
319             }
320         }
321     }
322     /*
323      * Enforce an upper limit on the size of N1 that is equal to the known size
324      * of the shift register, 64 bits or 72 bits if ECC is enabled.
325      * If the size exceeds 72 bits it is a user error so log an error,
326      * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
327      * error bit.
328      */
329     uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
330                                    s->regs[SPI_CLK_CFG_REG]);
331     if (ecc_control == 0 || ecc_control == 2) {
332         if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
333             qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
334                           "ECC enabled, bytes = 0x%x, bits = 0x%x\n",
335                           s->N1_bytes, s->N1_bits);
336             s->N1_bytes = PNV_SPI_REG_SIZE + 1;
337             s->N1_bits = s->N1_bytes * 8;
338         }
339     } else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
340         qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
341                       "bytes = 0x%x, bits = 0x%x\n",
342                       s->N1_bytes, s->N1_bits);
343         s->N1_bytes = PNV_SPI_REG_SIZE;
344         s->N1_bits = s->N1_bytes * 8;
345     }
346 } /* end of calculate_N1 */
347 
348 /*
349  * Shift_N1 operation handler method
350  */
operation_shiftn1(PnvSpi * s,uint8_t opcode,PnvXferBuffer ** payload,bool send_n1_alone)351 static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
352                        PnvXferBuffer **payload, bool send_n1_alone)
353 {
354     uint8_t n1_count;
355     bool stop = false;
356 
357     /*
358      * If there isn't a current payload left over from a stopped sequence
359      * create a new one.
360      */
361     if (*payload == NULL) {
362         *payload = pnv_spi_xfer_buffer_new();
363     }
364     /*
365      * Use a combination of N1 counters to build the N1 portion of the
366      * transmit payload.
367      * We only care about transmit at this time since the request payload
368      * only represents data going out on the controller output line.
369      * Leave mode specific considerations in the calculate function since
370      * all we really care about are counters that tell use exactly how
371      * many bytes are in the payload and how many of those bytes to
372      * include from the TDR into the payload.
373      */
374     calculate_N1(s, opcode);
375     trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
376                     s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
377     /*
378      * Zero out the N2 counters here in case there is no N2 operation following
379      * the N1 operation in the sequencer.  This keeps leftover N2 information
380      * from interfering with spi_response logic.
381      */
382     s->N2_bits = 0;
383     s->N2_bytes = 0;
384     s->N2_tx = 0;
385     s->N2_rx = 0;
386     /*
387      * N1_bytes is the overall size of the N1 portion of the frame regardless of
388      * whether N1 is used for tx, rx or both.  Loop over the size to build a
389      * payload that is N1_bytes long.
390      * N1_tx is the count of bytes to take from the TDR and "shift" into the
391      * frame which means append those bytes to the payload for the N1 portion
392      * of the frame.
393      * If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
394      * the frame until the overall N1 count is reached.
395      */
396     n1_count = 0;
397     while (n1_count < s->N1_bytes) {
398         /*
399          * Assuming that if N1_tx is not equal to 0 then it is the same as
400          * N1_bytes.
401          */
402         if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
403 
404             if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
405                 /*
406                  * Note that we are only appending to the payload IF the TDR
407                  * is full otherwise we don't touch the payload because we are
408                  * going to NOT send the payload and instead tell the sequencer
409                  * that called us to stop and wait for a TDR write so we have
410                  * data to load into the payload.
411                  */
412                 uint8_t n1_byte = 0x00;
413                 n1_byte = get_from_offset(s, n1_count);
414                 trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
415                 *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) =
416                         n1_byte;
417             } else {
418                 /*
419                  * We hit a shift_n1 opcode TX but the TDR is empty, tell the
420                  * sequencer to stop and break this loop.
421                  */
422                 trace_pnv_spi_sequencer_stop_requested("Shift N1"
423                                 "set for transmit but TDR is empty");
424                 stop = true;
425                 break;
426             }
427         } else {
428             /*
429              * Cases here:
430              * - we are receiving during the N1 frame segment and the RDR
431              *   is full so we need to stop until the RDR is read
432              * - we are transmitting and we don't care about RDR status
433              *   since we won't be loading RDR during the frame segment.
434              * - we are receiving and the RDR is empty so we allow the operation
435              *   to proceed.
436              */
437             if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL,
438                                            s->status) == 1)) {
439                 trace_pnv_spi_sequencer_stop_requested("shift N1"
440                                 "set for receive but RDR is full");
441                 stop = true;
442                 break;
443             } else {
444                 trace_pnv_spi_tx_append_FF("n1_byte");
445                 *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
446                         = 0xff;
447             }
448         }
449         n1_count++;
450     } /* end of while */
451     /*
452      * If we are not stopping due to an empty TDR and we are doing an N1 TX
453      * and the TDR is full we need to clear the TDR_full status.
454      * Do this here instead of up in the loop above so we don't log the message
455      * in every loop iteration.
456      * Ignore the send_n1_alone flag, all that does is defer the TX until the N2
457      * operation, which was found immediately after the current opcode.  The TDR
458      * was unloaded and will be shifted so we have to clear the TDR_full status.
459      */
460     if (!stop && (s->N1_tx != 0) &&
461         (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
462         s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
463     }
464     /*
465      * There are other reasons why the shifter would stop, such as a TDR empty
466      * or RDR full condition with N1 set to receive.  If we haven't stopped due
467      * to either one of those conditions then check if the send_n1_alone flag is
468      * equal to False, indicating the next opcode is an N2 operation, AND if
469      * the N2 counter reload switch (bit 0 of the N2 count control field) is
470      * set.  This condition requires a pacing write to "kick" off the N2
471      * shift which includes the N1 shift as well when send_n1_alone is False.
472      */
473     if (!stop && !send_n1_alone &&
474        (GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
475         trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
476                         "active, stop N1 shift, TDR_underrun set to 1");
477         stop = true;
478         s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
479     }
480     /*
481      * If send_n1_alone is set AND we have a full TDR then this is the first and
482      * last payload to send and we don't have an N2 frame segment to add to the
483      * payload.
484      */
485     if (send_n1_alone && !stop) {
486         /* We have a TX and a full TDR or an RX and an empty RDR */
487         trace_pnv_spi_tx_request("Shifting N1 frame", (*payload)->len);
488         transfer(s, *payload);
489         /* The N1 frame shift is complete so reset the N1 counters */
490         s->N2_bits = 0;
491         s->N2_bytes = 0;
492         s->N2_tx = 0;
493         s->N2_rx = 0;
494         pnv_spi_xfer_buffer_free(*payload);
495         *payload = NULL;
496     }
497     return stop;
498 } /* end of operation_shiftn1() */
499 
500 /*
501  * Calculate the N2 counters based on passed in opcode and
502  * internal register values.
503  * The method assumes that the opcode is a Shift_N2 opcode
504  * and doesn't test it.
505  * The counters returned are:
506  * N2 bits: Number of bits in the payload data that are significant
507  * to the responder.
508  * N2_bytes: Total count of payload bytes for the N2 frame.
509  * N2_tx: Total number of bytes taken from TDR for N2
510  * N2_rx: Total number of bytes taken from the payload for N2
511  */
calculate_N2(PnvSpi * s,uint8_t opcode)512 static void calculate_N2(PnvSpi *s, uint8_t opcode)
513 {
514     /*
515      * Shift_N2 opcode form: 0x4M
516      * Implicit mode:
517      * If M!=0 the shift count is M bytes and M is the number of rx bytes.
518      * Forced Implicit mode:
519      * M is the shift count but tx and rx is determined by the count control
520      * register fields.  Note that we only check for Forced Implicit mode when
521      * M != 0 since the mode doesn't make sense when M = 0.
522      * Explicit mode:
523      * If M==0 then shift count is number of bits defined in the
524      * Counter Configuration Register's shift_count_N1 field.
525      */
526     if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
527         /* Explicit mode */
528         s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
529         s->N2_bytes = (s->N2_bits + 7) / 8;
530         s->N2_tx = 0;
531         s->N2_rx = 0;
532         /* If tx count control for N2 is set, load the tx value */
533         if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
534             s->N2_tx = s->N2_bytes;
535         }
536         /* If rx count control for N2 is set, load the rx value */
537         if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
538             s->N2_rx = s->N2_bytes;
539         }
540     } else {
541         /* Implicit mode/Forced Implicit mode, use M field from opcode */
542         s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
543         s->N2_bits = s->N2_bytes * 8;
544         /* Assume that we are going to receive the count */
545         s->N2_rx = s->N2_bytes;
546         s->N2_tx = 0;
547         /* Let Forced Implicit mode have an effect on the counts */
548         if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
549             /*
550              * If Forced Implicit mode and count control doesn't
551              * indicate a receive then reset the rx count to 0
552              */
553             if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3,
554                                     s->regs[SPI_CTR_CFG_REG]) == 0) {
555                 s->N2_rx = 0;
556             }
557             /* If tx count control for N2 is set, load the tx value */
558             if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2,
559                                     s->regs[SPI_CTR_CFG_REG]) == 1) {
560                 s->N2_tx = s->N2_bytes;
561             }
562         }
563     }
564     /*
565      * Enforce an upper limit on the size of N1 that is equal to the
566      * known size of the shift register, 64 bits or 72 bits if ECC
567      * is enabled.
568      * If the size exceeds 72 bits it is a user error so log an error,
569      * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
570      * error bit.
571      */
572     uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
573                     s->regs[SPI_CLK_CFG_REG]);
574     if (ecc_control == 0 || ecc_control == 2) {
575         if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
576             /* Unsupported N2 shift size when ECC enabled */
577             s->N2_bytes = PNV_SPI_REG_SIZE + 1;
578             s->N2_bits = s->N2_bytes * 8;
579         }
580     } else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
581         /* Unsupported N2 shift size */
582         s->N2_bytes = PNV_SPI_REG_SIZE;
583         s->N2_bits = s->N2_bytes * 8;
584     }
585 } /* end of calculate_N2 */
586 
587 /*
588  * Shift_N2 operation handler method
589  */
590 
operation_shiftn2(PnvSpi * s,uint8_t opcode,PnvXferBuffer ** payload)591 static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
592                        PnvXferBuffer **payload)
593 {
594     uint8_t n2_count;
595     bool stop = false;
596 
597     /*
598      * If there isn't a current payload left over from a stopped sequence
599      * create a new one.
600      */
601     if (*payload == NULL) {
602         *payload = pnv_spi_xfer_buffer_new();
603     }
604     /*
605      * Use a combination of N2 counters to build the N2 portion of the
606      * transmit payload.
607      */
608     calculate_N2(s, opcode);
609     trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
610                     s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
611     /*
612      * The only difference between this code and the code for shift N1 is
613      * that this code has to account for the possible presence of N1 transmit
614      * bytes already taken from the TDR.
615      * If there are bytes to be transmitted for the N2 portion of the frame
616      * and there are still bytes in TDR that have not been copied into the
617      * TX data of the payload, this code will handle transmitting those
618      * remaining bytes.
619      * If for some reason the transmit count(s) add up to more than the size
620      * of the TDR we will just append 0xFF to the transmit payload data until
621      * the payload is N1 + N2 bytes long.
622      */
623     n2_count = 0;
624     while (n2_count < s->N2_bytes) {
625         /*
626          * If the RDR is full and we need to RX just bail out, letting the
627          * code continue will end up building the payload twice in the same
628          * buffer since RDR full causes a sequence stop and restart.
629          */
630         if ((s->N2_rx != 0) &&
631             (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
632             trace_pnv_spi_sequencer_stop_requested("shift N2 set"
633                             "for receive but RDR is full");
634             stop = true;
635             break;
636         }
637         if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) <
638                                 PNV_SPI_REG_SIZE)) {
639             /* Always append data for the N2 segment if it is set for TX */
640             uint8_t n2_byte = 0x00;
641             n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
642             trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
643             *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
644                     = n2_byte;
645         } else {
646             /*
647              * Regardless of whether or not N2 is set for TX or RX, we need
648              * the number of bytes in the payload to match the overall length
649              * of the operation.
650              */
651             trace_pnv_spi_tx_append_FF("n2_byte");
652             *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
653                     = 0xff;
654         }
655         n2_count++;
656     } /* end of while */
657     if (!stop) {
658         /* We have a TX and a full TDR or an RX and an empty RDR */
659         trace_pnv_spi_tx_request("Shifting N2 frame", (*payload)->len);
660         transfer(s, *payload);
661         /*
662          * If we are doing an N2 TX and the TDR is full we need to clear the
663          * TDR_full status. Do this here instead of up in the loop above so we
664          * don't log the message in every loop iteration.
665          */
666         if ((s->N2_tx != 0) &&
667             (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
668             s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
669         }
670         /*
671          * The N2 frame shift is complete so reset the N2 counters.
672          * Reset the N1 counters also in case the frame was a combination of
673          * N1 and N2 segments.
674          */
675         s->N2_bits = 0;
676         s->N2_bytes = 0;
677         s->N2_tx = 0;
678         s->N2_rx = 0;
679         s->N1_bits = 0;
680         s->N1_bytes = 0;
681         s->N1_tx = 0;
682         s->N1_rx = 0;
683         pnv_spi_xfer_buffer_free(*payload);
684         *payload = NULL;
685     }
686     return stop;
687 } /*  end of operation_shiftn2()*/
688 
operation_sequencer(PnvSpi * s)689 static void operation_sequencer(PnvSpi *s)
690 {
691     /*
692      * Loop through each sequencer operation ID and perform the requested
693      *  operations.
694      * Flag for indicating if we should send the N1 frame or wait to combine
695      * it with a preceding N2 frame.
696      */
697     bool send_n1_alone = true;
698     bool stop = false; /* Flag to stop the sequencer */
699     uint8_t opcode = 0;
700     uint8_t masked_opcode = 0;
701 
702     /*
703      * PnvXferBuffer for containing the payload of the SPI frame.
704      * This is a static because there are cases where a sequence has to stop
705      * and wait for the target application to unload the RDR.  If this occurs
706      * during a sequence where N1 is not sent alone and instead combined with
707      * N2 since the N1 tx length + the N2 tx length is less than the size of
708      * the TDR.
709      */
710     static PnvXferBuffer *payload;
711 
712     if (payload == NULL) {
713         payload = pnv_spi_xfer_buffer_new();
714     }
715     /*
716      * Clear the sequencer FSM error bit - general_SPI_status[3]
717      * before starting a sequence.
718      */
719     s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
720     /*
721      * If the FSM is idle set the sequencer index to 0
722      * (new/restarted sequence)
723      */
724     if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
725         s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
726     }
727     /*
728      * There are only 8 possible operation IDs to iterate through though
729      * some operations may cause more than one frame to be sequenced.
730      */
731     while (get_seq_index(s) < NUM_SEQ_OPS) {
732         opcode = s->seq_op[get_seq_index(s)];
733         /* Set sequencer state to decode */
734         s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
735         /*
736          * Only the upper nibble of the operation ID is needed to know what
737          * kind of operation is requested.
738          */
739         masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
740         switch (masked_opcode) {
741         /*
742          * Increment the operation index in each case instead of just
743          * once at the end in case an operation like the branch
744          * operation needs to change the index.
745          */
746         case SEQ_OP_STOP:
747             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
748             /* A stop operation in any position stops the sequencer */
749             trace_pnv_spi_sequencer_op("STOP", get_seq_index(s));
750 
751             stop = true;
752             s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
753             s->loop_counter_1 = 0;
754             s->loop_counter_2 = 0;
755             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
756             break;
757 
758         case SEQ_OP_SELECT_SLAVE:
759             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
760             trace_pnv_spi_sequencer_op("SELECT_SLAVE", get_seq_index(s));
761             /*
762              * This device currently only supports a single responder
763              * connection at position 0.  De-selecting a responder is fine
764              * and expected at the end of a sequence but selecting any
765              * responder other than 0 should cause an error.
766              */
767             s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
768             if (s->responder_select == 0) {
769                 trace_pnv_spi_shifter_done();
770                 qemu_set_irq(s->cs_line[0], 1);
771                 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
772                                 (get_seq_index(s) + 1));
773                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
774             } else if (s->responder_select != 1) {
775                 qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
776                               "not supported, select = 0x%x\n",
777                                s->responder_select);
778                 trace_pnv_spi_sequencer_stop_requested("invalid "
779                                 "responder select");
780                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
781                 stop = true;
782             } else {
783                 /*
784                  * Only allow an FSM_START state when a responder is
785                  * selected
786                  */
787                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
788                 trace_pnv_spi_shifter_stating();
789                 qemu_set_irq(s->cs_line[0], 0);
790                 /*
791                  * A Shift_N2 operation is only valid after a Shift_N1
792                  * according to the spec. The spec doesn't say if that means
793                  * immediately after or just after at any point. We will track
794                  * the occurrence of a Shift_N1 to enforce this requirement in
795                  * the most generic way possible by assuming that the rule
796                  * applies once a valid responder select has occurred.
797                  */
798                 s->shift_n1_done = false;
799                 next_sequencer_fsm(s);
800             }
801             break;
802 
803         case SEQ_OP_SHIFT_N1:
804             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
805             trace_pnv_spi_sequencer_op("SHIFT_N1", get_seq_index(s));
806             /*
807              * Only allow a shift_n1 when the state is not IDLE or DONE.
808              * In either of those two cases the sequencer is not in a proper
809              * state to perform shift operations because the sequencer has:
810              * - processed a responder deselect (DONE)
811              * - processed a stop opcode (IDLE)
812              * - encountered an error (IDLE)
813              */
814             if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
815                 (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
816                 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
817                               "shifter state = 0x%llx", GETFIELD(
818                         SPI_STS_SHIFTER_FSM, s->status));
819                 /*
820                  * Set sequencer FSM error bit 3 (general_SPI_status[3])
821                  * in status reg.
822                  */
823                 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
824                 trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
825                 stop = true;
826             } else {
827                 /*
828                  * Look for the special case where there is a shift_n1 set for
829                  * transmit and it is followed by a shift_n2 set for transmit
830                  * AND the combined transmit length of the two operations is
831                  * less than or equal to the size of the TDR register. In this
832                  * case we want to use both this current shift_n1 opcode and the
833                  * following shift_n2 opcode to assemble the frame for
834                  * transmission to the responder without requiring a refill of
835                  * the TDR between the two operations.
836                  */
837                 if (PNV_SPI_MASKED_OPCODE(s->seq_op[get_seq_index(s) + 1])
838                                 == SEQ_OP_SHIFT_N2) {
839                     send_n1_alone = false;
840                 }
841                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
842                                 FSM_SHIFT_N1);
843                 stop = operation_shiftn1(s, opcode, &payload, send_n1_alone);
844                 if (stop) {
845                     /*
846                      *  The operation code says to stop, this can occur if:
847                      * (1) RDR is full and the N1 shift is set for receive
848                      * (2) TDR was empty at the time of the N1 shift so we need
849                      * to wait for data.
850                      * (3) Neither 1 nor 2 are occurring and we aren't sending
851                      * N1 alone and N2 counter reload is set (bit 0 of the N2
852                      * counter reload field).  In this case TDR_underrun will
853                      * will be set and the Payload has been loaded so it is
854                      * ok to advance the sequencer.
855                      */
856                     if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
857                         s->shift_n1_done = true;
858                         s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
859                                                   FSM_SHIFT_N2);
860                         s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
861                                         (get_seq_index(s) + 1));
862                     } else {
863                         /*
864                          * This is case (1) or (2) so the sequencer needs to
865                          * wait and NOT go to the next sequence yet.
866                          */
867                         s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
868                                         FSM_WAIT);
869                     }
870                 } else {
871                     /* Ok to move on to the next index */
872                     s->shift_n1_done = true;
873                     next_sequencer_fsm(s);
874                 }
875             }
876             break;
877 
878         case SEQ_OP_SHIFT_N2:
879             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
880             trace_pnv_spi_sequencer_op("SHIFT_N2", get_seq_index(s));
881             if (!s->shift_n1_done) {
882                 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
883                               "Shift_N1 is not done, shifter state = 0x%llx",
884                               GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
885                 /*
886                  * In case the sequencer actually stops if an N2 shift is
887                  * requested before any N1 shift is done. Set sequencer FSM
888                  * error bit 3 (general_SPI_status[3]) in status reg.
889                  */
890                 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
891                 trace_pnv_spi_sequencer_stop_requested("shift_n2 "
892                                     "w/no shift_n1 done");
893                 stop = true;
894             } else {
895                 /* Ok to do a Shift_N2 */
896                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
897                                 FSM_SHIFT_N2);
898                 stop = operation_shiftn2(s, opcode, &payload);
899                 /*
900                  * If the operation code says to stop set the shifter state to
901                  * wait and stop
902                  */
903                 if (stop) {
904                     s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
905                                     FSM_WAIT);
906                 } else {
907                     /* Ok to move on to the next index */
908                     next_sequencer_fsm(s);
909                 }
910             }
911             break;
912 
913         case SEQ_OP_BRANCH_IFNEQ_RDR:
914             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
915             trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", get_seq_index(s));
916             /*
917              * The memory mapping register RDR match value is compared against
918              * the 16 rightmost bytes of the RDR (potentially with masking).
919              * Since this comparison is performed against the contents of the
920              * RDR then a receive must have previously occurred otherwise
921              * there is no data to compare and the operation cannot be
922              * completed and will stop the sequencer until RDR full is set to
923              * 1.
924              */
925             if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
926                 bool rdr_matched = false;
927                 rdr_matched = does_rdr_match(s);
928                 if (rdr_matched) {
929                     trace_pnv_spi_RDR_match("success");
930                     /* A match occurred, increment the sequencer index. */
931                     next_sequencer_fsm(s);
932                 } else {
933                     trace_pnv_spi_RDR_match("failed");
934                     /*
935                      * Branch the sequencer to the index coded into the op
936                      * code.
937                      */
938                     s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
939                                     PNV_SPI_OPCODE_LO_NIBBLE(opcode));
940                 }
941                 /*
942                  * Regardless of where the branch ended up we want the
943                  * sequencer to continue shifting so we have to clear
944                  * RDR_full.
945                  */
946                 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
947             } else {
948                 trace_pnv_spi_sequencer_stop_requested("RDR not"
949                                 "full for 0x6x opcode");
950                 stop = true;
951                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
952             }
953             break;
954 
955         case SEQ_OP_TRANSFER_TDR:
956             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
957             qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
958             next_sequencer_fsm(s);
959             break;
960 
961         case SEQ_OP_BRANCH_IFNEQ_INC_1:
962             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
963             trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", get_seq_index(s));
964             /*
965              * The spec says the loop should execute count compare + 1 times.
966              * However we learned from engineering that we really only loop
967              * count_compare times, count compare = 0 makes this op code a
968              * no-op
969              */
970             if (s->loop_counter_1 !=
971                 GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
972                 /*
973                  * Next index is the lower nibble of the branch operation ID,
974                  * mask off all but the first three bits so we don't try to
975                  * access beyond the sequencer_operation_reg boundary.
976                  */
977                 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
978                                 PNV_SPI_OPCODE_LO_NIBBLE(opcode));
979                 s->loop_counter_1++;
980             } else {
981                 /* Continue to next index if loop counter is reached */
982                 next_sequencer_fsm(s);
983             }
984             break;
985 
986         case SEQ_OP_BRANCH_IFNEQ_INC_2:
987             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
988             trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", get_seq_index(s));
989             uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
990                               s->regs[SPI_CTR_CFG_REG]);
991             /*
992              * The spec says the loop should execute count compare + 1 times.
993              * However we learned from engineering that we really only loop
994              * count_compare times, count compare = 0 makes this op code a
995              * no-op
996              */
997             if (s->loop_counter_2 != condition2) {
998                 /*
999                  * Next index is the lower nibble of the branch operation ID,
1000                  * mask off all but the first three bits so we don't try to
1001                  * access beyond the sequencer_operation_reg boundary.
1002                  */
1003                 s->status = SETFIELD(SPI_STS_SEQ_INDEX,
1004                                 s->status, PNV_SPI_OPCODE_LO_NIBBLE(opcode));
1005                 s->loop_counter_2++;
1006             } else {
1007                 /* Continue to next index if loop counter is reached */
1008                 next_sequencer_fsm(s);
1009             }
1010             break;
1011 
1012         default:
1013             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
1014             /* Ignore unsupported operations. */
1015             next_sequencer_fsm(s);
1016             break;
1017         } /* end of switch */
1018         /*
1019          * If we used all 8 opcodes without seeing a 00 - STOP in the sequence
1020          * we need to go ahead and end things as if there was a STOP at the
1021          * end.
1022          */
1023         if (get_seq_index(s) == NUM_SEQ_OPS) {
1024             /* All 8 opcodes completed, sequencer idling */
1025             s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
1026             s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
1027             s->loop_counter_1 = 0;
1028             s->loop_counter_2 = 0;
1029             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
1030             break;
1031         }
1032         /* Break the loop if a stop was requested */
1033         if (stop) {
1034             break;
1035         }
1036     } /* end of while */
1037     return;
1038 } /* end of operation_sequencer() */
1039 
1040 /*
1041  * The SPIC engine and its internal sequencer can be interrupted and reset by
1042  * a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
1043  * Miscellaneous Register of sbe_register_bo device.
1044  * Reset immediately aborts any SPI transaction in progress and returns the
1045  * sequencer and state machines to idle state.
1046  * The configuration register values are not changed. The status register is
1047  * not reset. The engine registers are not reset.
1048  * The SPIC engine reset does not have any affect on the attached devices.
1049  * Reset handling of any attached devices is beyond the scope of the engine.
1050  */
do_reset(DeviceState * dev)1051 static void do_reset(DeviceState *dev)
1052 {
1053     PnvSpi *s = PNV_SPI(dev);
1054     DeviceState *ssi_dev;
1055 
1056     trace_pnv_spi_reset();
1057 
1058     /* Connect cs irq */
1059     ssi_dev = ssi_get_cs(s->ssi_bus, 0);
1060     if (ssi_dev) {
1061         qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
1062         qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
1063     }
1064 
1065     /* Reset all N1 and N2 counters, and other constants */
1066     s->N2_bits = 0;
1067     s->N2_bytes = 0;
1068     s->N2_tx = 0;
1069     s->N2_rx = 0;
1070     s->N1_bits = 0;
1071     s->N1_bytes = 0;
1072     s->N1_tx = 0;
1073     s->N1_rx = 0;
1074     s->loop_counter_1 = 0;
1075     s->loop_counter_2 = 0;
1076     /* Disconnected from responder */
1077     qemu_set_irq(s->cs_line[0], 1);
1078 }
1079 
pnv_spi_xscom_read(void * opaque,hwaddr addr,unsigned size)1080 static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
1081 {
1082     PnvSpi *s = PNV_SPI(opaque);
1083     uint32_t reg = addr >> 3;
1084     uint64_t val = ~0ull;
1085 
1086     switch (reg) {
1087     case ERROR_REG:
1088     case SPI_CTR_CFG_REG:
1089     case CONFIG_REG1:
1090     case SPI_CLK_CFG_REG:
1091     case SPI_MM_REG:
1092     case SPI_XMIT_DATA_REG:
1093         val = s->regs[reg];
1094         break;
1095     case SPI_RCV_DATA_REG:
1096         val = s->regs[reg];
1097         trace_pnv_spi_read_RDR(val);
1098         s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
1099         if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
1100             trace_pnv_spi_start_sequencer();
1101             operation_sequencer(s);
1102         }
1103         break;
1104     case SPI_SEQ_OP_REG:
1105         val = 0;
1106         for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1107             val = (val << 8) | s->seq_op[i];
1108         }
1109         break;
1110     case SPI_STS_REG:
1111         val = s->status;
1112         break;
1113     default:
1114         qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1115                  "read at 0x%" PRIx32 "\n", reg);
1116     }
1117 
1118     trace_pnv_spi_read(addr, val);
1119     return val;
1120 }
1121 
pnv_spi_xscom_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1122 static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
1123                                  uint64_t val, unsigned size)
1124 {
1125     PnvSpi *s = PNV_SPI(opaque);
1126     uint32_t reg = addr >> 3;
1127 
1128     trace_pnv_spi_write(addr, val);
1129 
1130     switch (reg) {
1131     case ERROR_REG:
1132     case SPI_CTR_CFG_REG:
1133     case CONFIG_REG1:
1134     case SPI_MM_REG:
1135     case SPI_RCV_DATA_REG:
1136         s->regs[reg] = val;
1137         break;
1138     case SPI_CLK_CFG_REG:
1139         /*
1140          * To reset the SPI controller write the sequence 0x5 0xA to
1141          * reset_control field
1142          */
1143         if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
1144              && (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
1145                 /* SPI controller reset sequence completed, resetting */
1146             s->regs[reg] = SPI_CLK_CFG_HARD_RST;
1147         } else {
1148             s->regs[reg] = val;
1149         }
1150         break;
1151     case SPI_XMIT_DATA_REG:
1152         /*
1153          * Writing to the transmit data register causes the transmit data
1154          * register full status bit in the status register to be set.  Writing
1155          * when the transmit data register full status bit is already set
1156          * causes a "Resource Not Available" condition.  This is not possible
1157          * in the model since writes to this register are not asynchronous to
1158          * the operation sequence like it would be in hardware.
1159          */
1160         s->regs[reg] = val;
1161         trace_pnv_spi_write_TDR(val);
1162         s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
1163         s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
1164         trace_pnv_spi_start_sequencer();
1165         operation_sequencer(s);
1166         break;
1167     case SPI_SEQ_OP_REG:
1168         for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1169             s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
1170         }
1171         break;
1172     case SPI_STS_REG:
1173         /* other fields are ignore_write */
1174         s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
1175                                   GETFIELD(SPI_STS_RDR, val));
1176         s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
1177                                   GETFIELD(SPI_STS_TDR, val));
1178         break;
1179     default:
1180         qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1181                  "write at 0x%" PRIx32 "\n", reg);
1182     }
1183     return;
1184 }
1185 
1186 static const MemoryRegionOps pnv_spi_xscom_ops = {
1187     .read = pnv_spi_xscom_read,
1188     .write = pnv_spi_xscom_write,
1189     .valid.min_access_size = 8,
1190     .valid.max_access_size = 8,
1191     .impl.min_access_size = 8,
1192     .impl.max_access_size = 8,
1193     .endianness = DEVICE_BIG_ENDIAN,
1194 };
1195 
1196 static Property pnv_spi_properties[] = {
1197     DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
1198     DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
1199     DEFINE_PROP_END_OF_LIST(),
1200 };
1201 
pnv_spi_realize(DeviceState * dev,Error ** errp)1202 static void pnv_spi_realize(DeviceState *dev, Error **errp)
1203 {
1204     PnvSpi *s = PNV_SPI(dev);
1205     g_autofree char *name = g_strdup_printf(TYPE_PNV_SPI_BUS ".%d",
1206                     s->spic_num);
1207     s->ssi_bus = ssi_create_bus(dev, name);
1208     s->cs_line = g_new0(qemu_irq, 1);
1209     qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
1210 
1211     /* spi scoms */
1212     pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
1213                           s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
1214 }
1215 
pnv_spi_dt_xscom(PnvXScomInterface * dev,void * fdt,int offset)1216 static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
1217                              int offset)
1218 {
1219     PnvSpi *s = PNV_SPI(dev);
1220     g_autofree char *name;
1221     int s_offset;
1222     const char compat[] = "ibm,power10-spi";
1223     uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
1224         s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
1225     uint32_t reg[] = {
1226         cpu_to_be32(spic_pcba),
1227         cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
1228     };
1229     name = g_strdup_printf("pnv_spi@%x", spic_pcba);
1230     s_offset = fdt_add_subnode(fdt, offset, name);
1231     _FDT(s_offset);
1232 
1233     _FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
1234     _FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
1235     _FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
1236     return 0;
1237 }
1238 
pnv_spi_class_init(ObjectClass * klass,void * data)1239 static void pnv_spi_class_init(ObjectClass *klass, void *data)
1240 {
1241     DeviceClass *dc = DEVICE_CLASS(klass);
1242     PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
1243 
1244     xscomc->dt_xscom = pnv_spi_dt_xscom;
1245 
1246     dc->desc = "PowerNV SPI";
1247     dc->realize = pnv_spi_realize;
1248     dc->reset = do_reset;
1249     device_class_set_props(dc, pnv_spi_properties);
1250 }
1251 
1252 static const TypeInfo pnv_spi_info = {
1253     .name          = TYPE_PNV_SPI,
1254     .parent        = TYPE_SYS_BUS_DEVICE,
1255     .instance_size = sizeof(PnvSpi),
1256     .class_init    = pnv_spi_class_init,
1257     .interfaces    = (InterfaceInfo[]) {
1258         { TYPE_PNV_XSCOM_INTERFACE },
1259         { }
1260     }
1261 };
1262 
pnv_spi_register_types(void)1263 static void pnv_spi_register_types(void)
1264 {
1265     type_register_static(&pnv_spi_info);
1266 }
1267 
1268 type_init(pnv_spi_register_types);
1269