1 /*
2 * QEMU PowerPC SPI model
3 *
4 * Copyright (c) 2024, IBM Corporation.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "hw/qdev-properties.h"
12 #include "hw/ppc/pnv_xscom.h"
13 #include "hw/ssi/pnv_spi.h"
14 #include "hw/ssi/pnv_spi_regs.h"
15 #include "hw/ssi/ssi.h"
16 #include <libfdt.h>
17 #include "hw/irq.h"
18 #include "trace.h"
19
20 #define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
21 #define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
22
23 /*
24 * Macro from include/hw/ppc/fdt.h
25 * fdt.h cannot be included here as it contain ppc target specific dependency.
26 */
27 #define _FDT(exp) \
28 do { \
29 int _ret = (exp); \
30 if (_ret < 0) { \
31 qemu_log_mask(LOG_GUEST_ERROR, \
32 "error creating device tree: %s: %s", \
33 #exp, fdt_strerror(_ret)); \
34 exit(1); \
35 } \
36 } while (0)
37
38 /* PnvXferBuffer */
39 typedef struct PnvXferBuffer {
40
41 uint32_t len;
42 uint8_t *data;
43
44 } PnvXferBuffer;
45
46 /* pnv_spi_xfer_buffer_methods */
pnv_spi_xfer_buffer_new(void)47 static PnvXferBuffer *pnv_spi_xfer_buffer_new(void)
48 {
49 PnvXferBuffer *payload = g_malloc0(sizeof(*payload));
50
51 return payload;
52 }
53
pnv_spi_xfer_buffer_free(PnvXferBuffer * payload)54 static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload)
55 {
56 g_free(payload->data);
57 g_free(payload);
58 }
59
pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer * payload,uint32_t offset,uint32_t length)60 static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload,
61 uint32_t offset, uint32_t length)
62 {
63 if (payload->len < (offset + length)) {
64 payload->len = offset + length;
65 payload->data = g_realloc(payload->data, payload->len);
66 }
67 return &payload->data[offset];
68 }
69
does_rdr_match(PnvSpi * s)70 static bool does_rdr_match(PnvSpi *s)
71 {
72 /*
73 * According to spec, the mask bits that are 0 are compared and the
74 * bits that are 1 are ignored.
75 */
76 uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK,
77 s->regs[SPI_MM_REG]);
78 uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL,
79 s->regs[SPI_MM_REG]);
80
81 if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
82 GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
83 return true;
84 }
85 return false;
86 }
87
get_from_offset(PnvSpi * s,uint8_t offset)88 static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
89 {
90 uint8_t byte;
91
92 /*
93 * Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
94 * Check the offset before using it.
95 */
96 if (offset < PNV_SPI_REG_SIZE) {
97 byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
98 } else {
99 /*
100 * Log an error and return a 0xFF since we have to assign something
101 * to byte before returning.
102 */
103 qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
104 "from TDR\n", offset);
105 byte = 0xff;
106 }
107 return byte;
108 }
109
read_from_frame(PnvSpi * s,uint8_t * read_buf,uint8_t nr_bytes,uint8_t ecc_count,uint8_t shift_in_count)110 static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes,
111 uint8_t ecc_count, uint8_t shift_in_count)
112 {
113 uint8_t byte;
114 int count = 0;
115
116 while (count < nr_bytes) {
117 shift_in_count++;
118 if ((ecc_count != 0) &&
119 (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
120 shift_in_count = 0;
121 } else {
122 byte = read_buf[count];
123 trace_pnv_spi_shift_rx(byte, count);
124 s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
125 }
126 count++;
127 } /* end of while */
128 return shift_in_count;
129 }
130
spi_response(PnvSpi * s,int bits,PnvXferBuffer * rsp_payload)131 static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
132 {
133 uint8_t ecc_count;
134 uint8_t shift_in_count;
135
136 /*
137 * Processing here must handle:
138 * - Which bytes in the payload we should move to the RDR
139 * - Explicit mode counter configuration settings
140 * - RDR full and RDR overrun status
141 */
142
143 /*
144 * First check that the response payload is the exact same
145 * number of bytes as the request payload was
146 */
147 if (rsp_payload->len != (s->N1_bytes + s->N2_bytes)) {
148 qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
149 "bytes, expected %d, got %d\n",
150 (s->N1_bytes + s->N2_bytes), rsp_payload->len);
151 } else {
152 uint8_t ecc_control;
153 trace_pnv_spi_rx_received(rsp_payload->len);
154 trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
155 s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
156 /*
157 * Adding an ECC count let's us know when we have found a payload byte
158 * that was shifted in but cannot be loaded into RDR. Bits 29-30 of
159 * clock_config_reset_control register equal to either 0b00 or 0b10
160 * indicate that we are taking in data with ECC and either applying
161 * the ECC or discarding it.
162 */
163 ecc_count = 0;
164 ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
165 if (ecc_control == 0 || ecc_control == 2) {
166 ecc_count = 1;
167 }
168 /*
169 * Use the N1_rx and N2_rx counts to control shifting data from the
170 * payload into the RDR. Keep an overall count of the number of bytes
171 * shifted into RDR so we can discard every 9th byte when ECC is
172 * enabled.
173 */
174 shift_in_count = 0;
175 /* Handle the N1 portion of the frame first */
176 if (s->N1_rx != 0) {
177 trace_pnv_spi_rx_read_N1frame();
178 shift_in_count = read_from_frame(s, &rsp_payload->data[0],
179 s->N1_bytes, ecc_count, shift_in_count);
180 }
181 /* Handle the N2 portion of the frame */
182 if (s->N2_rx != 0) {
183 trace_pnv_spi_rx_read_N2frame();
184 shift_in_count = read_from_frame(s,
185 &rsp_payload->data[s->N1_bytes], s->N2_bytes,
186 ecc_count, shift_in_count);
187 }
188 if ((s->N1_rx + s->N2_rx) > 0) {
189 /*
190 * Data was received so handle RDR status.
191 * It is easier to handle RDR_full and RDR_overrun status here
192 * since the RDR register's shift_byte_in method is called
193 * multiple times in a row. Controlling RDR status is done here
194 * instead of in the RDR scoped methods for that reason.
195 */
196 if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
197 /*
198 * Data was shifted into the RDR before having been read
199 * causing previous data to have been overrun.
200 */
201 s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
202 } else {
203 /*
204 * Set status to indicate that the received data register is
205 * full. This flag is only cleared once the RDR is unloaded.
206 */
207 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
208 }
209 }
210 } /* end of else */
211 } /* end of spi_response() */
212
transfer(PnvSpi * s,PnvXferBuffer * payload)213 static void transfer(PnvSpi *s, PnvXferBuffer *payload)
214 {
215 uint32_t tx;
216 uint32_t rx;
217 PnvXferBuffer *rsp_payload = NULL;
218
219 rsp_payload = pnv_spi_xfer_buffer_new();
220 if (!rsp_payload) {
221 return;
222 }
223 for (int offset = 0; offset < payload->len; offset += s->transfer_len) {
224 tx = 0;
225 for (int i = 0; i < s->transfer_len; i++) {
226 if ((offset + i) >= payload->len) {
227 tx <<= 8;
228 } else {
229 tx = (tx << 8) | payload->data[offset + i];
230 }
231 }
232 rx = ssi_transfer(s->ssi_bus, tx);
233 for (int i = 0; i < s->transfer_len; i++) {
234 if ((offset + i) >= payload->len) {
235 break;
236 }
237 *(pnv_spi_xfer_buffer_write_ptr(rsp_payload, rsp_payload->len, 1)) =
238 (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
239 }
240 }
241 spi_response(s, s->N1_bits, rsp_payload);
242 pnv_spi_xfer_buffer_free(rsp_payload);
243 }
244
get_seq_index(PnvSpi * s)245 static inline uint8_t get_seq_index(PnvSpi *s)
246 {
247 return GETFIELD(SPI_STS_SEQ_INDEX, s->status);
248 }
249
next_sequencer_fsm(PnvSpi * s)250 static inline void next_sequencer_fsm(PnvSpi *s)
251 {
252 uint8_t seq_index = get_seq_index(s);
253 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, (seq_index + 1));
254 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
255 }
256
257 /*
258 * Calculate the N1 counters based on passed in opcode and
259 * internal register values.
260 * The method assumes that the opcode is a Shift_N1 opcode
261 * and doesn't test it.
262 * The counters returned are:
263 * N1 bits: Number of bits in the payload data that are significant
264 * to the responder.
265 * N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
266 * N1_tx: Total number of bytes taken from TDR for N1
267 * N1_rx: Total number of bytes taken from the payload for N1
268 */
calculate_N1(PnvSpi * s,uint8_t opcode)269 static void calculate_N1(PnvSpi *s, uint8_t opcode)
270 {
271 /*
272 * Shift_N1 opcode form: 0x3M
273 * Implicit mode:
274 * If M != 0 the shift count is M bytes and M is the number of tx bytes.
275 * Forced Implicit mode:
276 * M is the shift count but tx and rx is determined by the count control
277 * register fields. Note that we only check for forced Implicit mode when
278 * M != 0 since the mode doesn't make sense when M = 0.
279 * Explicit mode:
280 * If M == 0 then shift count is number of bits defined in the
281 * Counter Configuration Register's shift_count_N1 field.
282 */
283 if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
284 /* Explicit mode */
285 s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
286 s->N1_bytes = (s->N1_bits + 7) / 8;
287 s->N1_tx = 0;
288 s->N1_rx = 0;
289 /* If tx count control for N1 is set, load the tx value */
290 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
291 s->N1_tx = s->N1_bytes;
292 }
293 /* If rx count control for N1 is set, load the rx value */
294 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
295 s->N1_rx = s->N1_bytes;
296 }
297 } else {
298 /* Implicit mode/Forced Implicit mode, use M field from opcode */
299 s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
300 s->N1_bits = s->N1_bytes * 8;
301 /*
302 * Assume that we are going to transmit the count
303 * (pure Implicit only)
304 */
305 s->N1_tx = s->N1_bytes;
306 s->N1_rx = 0;
307 /* Let Forced Implicit mode have an effect on the counts */
308 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
309 /*
310 * If Forced Implicit mode and count control doesn't
311 * indicate transmit then reset the tx count to 0
312 */
313 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2,
314 s->regs[SPI_CTR_CFG_REG]) == 0) {
315 s->N1_tx = 0;
316 }
317 /* If rx count control for N1 is set, load the rx value */
318 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3,
319 s->regs[SPI_CTR_CFG_REG]) == 1) {
320 s->N1_rx = s->N1_bytes;
321 }
322 }
323 }
324 /*
325 * Enforce an upper limit on the size of N1 that is equal to the known size
326 * of the shift register, 64 bits or 72 bits if ECC is enabled.
327 * If the size exceeds 72 bits it is a user error so log an error,
328 * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
329 * error bit.
330 */
331 uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
332 s->regs[SPI_CLK_CFG_REG]);
333 if (ecc_control == 0 || ecc_control == 2) {
334 if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
335 qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
336 "ECC enabled, bytes = 0x%x, bits = 0x%x\n",
337 s->N1_bytes, s->N1_bits);
338 s->N1_bytes = PNV_SPI_REG_SIZE + 1;
339 s->N1_bits = s->N1_bytes * 8;
340 }
341 } else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
342 qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
343 "bytes = 0x%x, bits = 0x%x\n",
344 s->N1_bytes, s->N1_bits);
345 s->N1_bytes = PNV_SPI_REG_SIZE;
346 s->N1_bits = s->N1_bytes * 8;
347 }
348 } /* end of calculate_N1 */
349
350 /*
351 * Shift_N1 operation handler method
352 */
operation_shiftn1(PnvSpi * s,uint8_t opcode,PnvXferBuffer ** payload,bool send_n1_alone)353 static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
354 PnvXferBuffer **payload, bool send_n1_alone)
355 {
356 uint8_t n1_count;
357 bool stop = false;
358
359 /*
360 * If there isn't a current payload left over from a stopped sequence
361 * create a new one.
362 */
363 if (*payload == NULL) {
364 *payload = pnv_spi_xfer_buffer_new();
365 }
366 /*
367 * Use a combination of N1 counters to build the N1 portion of the
368 * transmit payload.
369 * We only care about transmit at this time since the request payload
370 * only represents data going out on the controller output line.
371 * Leave mode specific considerations in the calculate function since
372 * all we really care about are counters that tell use exactly how
373 * many bytes are in the payload and how many of those bytes to
374 * include from the TDR into the payload.
375 */
376 calculate_N1(s, opcode);
377 trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
378 s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
379 /*
380 * Zero out the N2 counters here in case there is no N2 operation following
381 * the N1 operation in the sequencer. This keeps leftover N2 information
382 * from interfering with spi_response logic.
383 */
384 s->N2_bits = 0;
385 s->N2_bytes = 0;
386 s->N2_tx = 0;
387 s->N2_rx = 0;
388 /*
389 * N1_bytes is the overall size of the N1 portion of the frame regardless of
390 * whether N1 is used for tx, rx or both. Loop over the size to build a
391 * payload that is N1_bytes long.
392 * N1_tx is the count of bytes to take from the TDR and "shift" into the
393 * frame which means append those bytes to the payload for the N1 portion
394 * of the frame.
395 * If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
396 * the frame until the overall N1 count is reached.
397 */
398 n1_count = 0;
399 while (n1_count < s->N1_bytes) {
400 /*
401 * Assuming that if N1_tx is not equal to 0 then it is the same as
402 * N1_bytes.
403 */
404 if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
405
406 if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
407 /*
408 * Note that we are only appending to the payload IF the TDR
409 * is full otherwise we don't touch the payload because we are
410 * going to NOT send the payload and instead tell the sequencer
411 * that called us to stop and wait for a TDR write so we have
412 * data to load into the payload.
413 */
414 uint8_t n1_byte = 0x00;
415 n1_byte = get_from_offset(s, n1_count);
416 trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
417 *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) =
418 n1_byte;
419 } else {
420 /*
421 * We hit a shift_n1 opcode TX but the TDR is empty, tell the
422 * sequencer to stop and break this loop.
423 */
424 trace_pnv_spi_sequencer_stop_requested("Shift N1"
425 "set for transmit but TDR is empty");
426 stop = true;
427 break;
428 }
429 } else {
430 /*
431 * Cases here:
432 * - we are receiving during the N1 frame segment and the RDR
433 * is full so we need to stop until the RDR is read
434 * - we are transmitting and we don't care about RDR status
435 * since we won't be loading RDR during the frame segment.
436 * - we are receiving and the RDR is empty so we allow the operation
437 * to proceed.
438 */
439 if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL,
440 s->status) == 1)) {
441 trace_pnv_spi_sequencer_stop_requested("shift N1"
442 "set for receive but RDR is full");
443 stop = true;
444 break;
445 } else {
446 trace_pnv_spi_tx_append_FF("n1_byte");
447 *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
448 = 0xff;
449 }
450 }
451 n1_count++;
452 } /* end of while */
453 /*
454 * If we are not stopping due to an empty TDR and we are doing an N1 TX
455 * and the TDR is full we need to clear the TDR_full status.
456 * Do this here instead of up in the loop above so we don't log the message
457 * in every loop iteration.
458 * Ignore the send_n1_alone flag, all that does is defer the TX until the N2
459 * operation, which was found immediately after the current opcode. The TDR
460 * was unloaded and will be shifted so we have to clear the TDR_full status.
461 */
462 if (!stop && (s->N1_tx != 0) &&
463 (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
464 s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
465 }
466 /*
467 * There are other reasons why the shifter would stop, such as a TDR empty
468 * or RDR full condition with N1 set to receive. If we haven't stopped due
469 * to either one of those conditions then check if the send_n1_alone flag is
470 * equal to False, indicating the next opcode is an N2 operation, AND if
471 * the N2 counter reload switch (bit 0 of the N2 count control field) is
472 * set. This condition requires a pacing write to "kick" off the N2
473 * shift which includes the N1 shift as well when send_n1_alone is False.
474 */
475 if (!stop && !send_n1_alone &&
476 (GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
477 trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
478 "active, stop N1 shift, TDR_underrun set to 1");
479 stop = true;
480 s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
481 }
482 /*
483 * If send_n1_alone is set AND we have a full TDR then this is the first and
484 * last payload to send and we don't have an N2 frame segment to add to the
485 * payload.
486 */
487 if (send_n1_alone && !stop) {
488 /* We have a TX and a full TDR or an RX and an empty RDR */
489 trace_pnv_spi_tx_request("Shifting N1 frame", (*payload)->len);
490 transfer(s, *payload);
491 /* The N1 frame shift is complete so reset the N1 counters */
492 s->N2_bits = 0;
493 s->N2_bytes = 0;
494 s->N2_tx = 0;
495 s->N2_rx = 0;
496 pnv_spi_xfer_buffer_free(*payload);
497 *payload = NULL;
498 }
499 return stop;
500 } /* end of operation_shiftn1() */
501
502 /*
503 * Calculate the N2 counters based on passed in opcode and
504 * internal register values.
505 * The method assumes that the opcode is a Shift_N2 opcode
506 * and doesn't test it.
507 * The counters returned are:
508 * N2 bits: Number of bits in the payload data that are significant
509 * to the responder.
510 * N2_bytes: Total count of payload bytes for the N2 frame.
511 * N2_tx: Total number of bytes taken from TDR for N2
512 * N2_rx: Total number of bytes taken from the payload for N2
513 */
calculate_N2(PnvSpi * s,uint8_t opcode)514 static void calculate_N2(PnvSpi *s, uint8_t opcode)
515 {
516 /*
517 * Shift_N2 opcode form: 0x4M
518 * Implicit mode:
519 * If M!=0 the shift count is M bytes and M is the number of rx bytes.
520 * Forced Implicit mode:
521 * M is the shift count but tx and rx is determined by the count control
522 * register fields. Note that we only check for Forced Implicit mode when
523 * M != 0 since the mode doesn't make sense when M = 0.
524 * Explicit mode:
525 * If M==0 then shift count is number of bits defined in the
526 * Counter Configuration Register's shift_count_N1 field.
527 */
528 if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
529 /* Explicit mode */
530 s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
531 s->N2_bytes = (s->N2_bits + 7) / 8;
532 s->N2_tx = 0;
533 s->N2_rx = 0;
534 /* If tx count control for N2 is set, load the tx value */
535 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
536 s->N2_tx = s->N2_bytes;
537 }
538 /* If rx count control for N2 is set, load the rx value */
539 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
540 s->N2_rx = s->N2_bytes;
541 }
542 } else {
543 /* Implicit mode/Forced Implicit mode, use M field from opcode */
544 s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
545 s->N2_bits = s->N2_bytes * 8;
546 /* Assume that we are going to receive the count */
547 s->N2_rx = s->N2_bytes;
548 s->N2_tx = 0;
549 /* Let Forced Implicit mode have an effect on the counts */
550 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
551 /*
552 * If Forced Implicit mode and count control doesn't
553 * indicate a receive then reset the rx count to 0
554 */
555 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3,
556 s->regs[SPI_CTR_CFG_REG]) == 0) {
557 s->N2_rx = 0;
558 }
559 /* If tx count control for N2 is set, load the tx value */
560 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2,
561 s->regs[SPI_CTR_CFG_REG]) == 1) {
562 s->N2_tx = s->N2_bytes;
563 }
564 }
565 }
566 /*
567 * Enforce an upper limit on the size of N1 that is equal to the
568 * known size of the shift register, 64 bits or 72 bits if ECC
569 * is enabled.
570 * If the size exceeds 72 bits it is a user error so log an error,
571 * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
572 * error bit.
573 */
574 uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
575 s->regs[SPI_CLK_CFG_REG]);
576 if (ecc_control == 0 || ecc_control == 2) {
577 if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
578 /* Unsupported N2 shift size when ECC enabled */
579 s->N2_bytes = PNV_SPI_REG_SIZE + 1;
580 s->N2_bits = s->N2_bytes * 8;
581 }
582 } else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
583 /* Unsupported N2 shift size */
584 s->N2_bytes = PNV_SPI_REG_SIZE;
585 s->N2_bits = s->N2_bytes * 8;
586 }
587 } /* end of calculate_N2 */
588
589 /*
590 * Shift_N2 operation handler method
591 */
592
operation_shiftn2(PnvSpi * s,uint8_t opcode,PnvXferBuffer ** payload)593 static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
594 PnvXferBuffer **payload)
595 {
596 uint8_t n2_count;
597 bool stop = false;
598
599 /*
600 * If there isn't a current payload left over from a stopped sequence
601 * create a new one.
602 */
603 if (*payload == NULL) {
604 *payload = pnv_spi_xfer_buffer_new();
605 }
606 /*
607 * Use a combination of N2 counters to build the N2 portion of the
608 * transmit payload.
609 */
610 calculate_N2(s, opcode);
611 trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
612 s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
613 /*
614 * The only difference between this code and the code for shift N1 is
615 * that this code has to account for the possible presence of N1 transmit
616 * bytes already taken from the TDR.
617 * If there are bytes to be transmitted for the N2 portion of the frame
618 * and there are still bytes in TDR that have not been copied into the
619 * TX data of the payload, this code will handle transmitting those
620 * remaining bytes.
621 * If for some reason the transmit count(s) add up to more than the size
622 * of the TDR we will just append 0xFF to the transmit payload data until
623 * the payload is N1 + N2 bytes long.
624 */
625 n2_count = 0;
626 while (n2_count < s->N2_bytes) {
627 /*
628 * If the RDR is full and we need to RX just bail out, letting the
629 * code continue will end up building the payload twice in the same
630 * buffer since RDR full causes a sequence stop and restart.
631 */
632 if ((s->N2_rx != 0) &&
633 (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
634 trace_pnv_spi_sequencer_stop_requested("shift N2 set"
635 "for receive but RDR is full");
636 stop = true;
637 break;
638 }
639 if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) <
640 PNV_SPI_REG_SIZE)) {
641 /* Always append data for the N2 segment if it is set for TX */
642 uint8_t n2_byte = 0x00;
643 n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
644 trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
645 *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
646 = n2_byte;
647 } else {
648 /*
649 * Regardless of whether or not N2 is set for TX or RX, we need
650 * the number of bytes in the payload to match the overall length
651 * of the operation.
652 */
653 trace_pnv_spi_tx_append_FF("n2_byte");
654 *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
655 = 0xff;
656 }
657 n2_count++;
658 } /* end of while */
659 if (!stop) {
660 /* We have a TX and a full TDR or an RX and an empty RDR */
661 trace_pnv_spi_tx_request("Shifting N2 frame", (*payload)->len);
662 transfer(s, *payload);
663 /*
664 * If we are doing an N2 TX and the TDR is full we need to clear the
665 * TDR_full status. Do this here instead of up in the loop above so we
666 * don't log the message in every loop iteration.
667 */
668 if ((s->N2_tx != 0) &&
669 (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
670 s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
671 }
672 /*
673 * The N2 frame shift is complete so reset the N2 counters.
674 * Reset the N1 counters also in case the frame was a combination of
675 * N1 and N2 segments.
676 */
677 s->N2_bits = 0;
678 s->N2_bytes = 0;
679 s->N2_tx = 0;
680 s->N2_rx = 0;
681 s->N1_bits = 0;
682 s->N1_bytes = 0;
683 s->N1_tx = 0;
684 s->N1_rx = 0;
685 pnv_spi_xfer_buffer_free(*payload);
686 *payload = NULL;
687 }
688 return stop;
689 } /* end of operation_shiftn2()*/
690
operation_sequencer(PnvSpi * s)691 static void operation_sequencer(PnvSpi *s)
692 {
693 /*
694 * Loop through each sequencer operation ID and perform the requested
695 * operations.
696 * Flag for indicating if we should send the N1 frame or wait to combine
697 * it with a preceding N2 frame.
698 */
699 bool send_n1_alone = true;
700 bool stop = false; /* Flag to stop the sequencer */
701 uint8_t opcode = 0;
702 uint8_t masked_opcode = 0;
703
704 /*
705 * PnvXferBuffer for containing the payload of the SPI frame.
706 * This is a static because there are cases where a sequence has to stop
707 * and wait for the target application to unload the RDR. If this occurs
708 * during a sequence where N1 is not sent alone and instead combined with
709 * N2 since the N1 tx length + the N2 tx length is less than the size of
710 * the TDR.
711 */
712 static PnvXferBuffer *payload;
713
714 if (payload == NULL) {
715 payload = pnv_spi_xfer_buffer_new();
716 }
717 /*
718 * Clear the sequencer FSM error bit - general_SPI_status[3]
719 * before starting a sequence.
720 */
721 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
722 /*
723 * If the FSM is idle set the sequencer index to 0
724 * (new/restarted sequence)
725 */
726 if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
727 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
728 }
729 /*
730 * There are only 8 possible operation IDs to iterate through though
731 * some operations may cause more than one frame to be sequenced.
732 */
733 while (get_seq_index(s) < NUM_SEQ_OPS) {
734 opcode = s->seq_op[get_seq_index(s)];
735 /* Set sequencer state to decode */
736 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
737 /*
738 * Only the upper nibble of the operation ID is needed to know what
739 * kind of operation is requested.
740 */
741 masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
742 switch (masked_opcode) {
743 /*
744 * Increment the operation index in each case instead of just
745 * once at the end in case an operation like the branch
746 * operation needs to change the index.
747 */
748 case SEQ_OP_STOP:
749 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
750 /* A stop operation in any position stops the sequencer */
751 trace_pnv_spi_sequencer_op("STOP", get_seq_index(s));
752
753 stop = true;
754 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
755 s->loop_counter_1 = 0;
756 s->loop_counter_2 = 0;
757 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
758 break;
759
760 case SEQ_OP_SELECT_SLAVE:
761 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
762 trace_pnv_spi_sequencer_op("SELECT_SLAVE", get_seq_index(s));
763 /*
764 * This device currently only supports a single responder
765 * connection at position 0. De-selecting a responder is fine
766 * and expected at the end of a sequence but selecting any
767 * responder other than 0 should cause an error.
768 */
769 s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
770 if (s->responder_select == 0) {
771 trace_pnv_spi_shifter_done();
772 qemu_set_irq(s->cs_line[0], 1);
773 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
774 (get_seq_index(s) + 1));
775 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
776 } else if (s->responder_select != 1) {
777 qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
778 "not supported, select = 0x%x\n",
779 s->responder_select);
780 trace_pnv_spi_sequencer_stop_requested("invalid "
781 "responder select");
782 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
783 stop = true;
784 } else {
785 /*
786 * Only allow an FSM_START state when a responder is
787 * selected
788 */
789 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
790 trace_pnv_spi_shifter_stating();
791 qemu_set_irq(s->cs_line[0], 0);
792 /*
793 * A Shift_N2 operation is only valid after a Shift_N1
794 * according to the spec. The spec doesn't say if that means
795 * immediately after or just after at any point. We will track
796 * the occurrence of a Shift_N1 to enforce this requirement in
797 * the most generic way possible by assuming that the rule
798 * applies once a valid responder select has occurred.
799 */
800 s->shift_n1_done = false;
801 next_sequencer_fsm(s);
802 }
803 break;
804
805 case SEQ_OP_SHIFT_N1:
806 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
807 trace_pnv_spi_sequencer_op("SHIFT_N1", get_seq_index(s));
808 /*
809 * Only allow a shift_n1 when the state is not IDLE or DONE.
810 * In either of those two cases the sequencer is not in a proper
811 * state to perform shift operations because the sequencer has:
812 * - processed a responder deselect (DONE)
813 * - processed a stop opcode (IDLE)
814 * - encountered an error (IDLE)
815 */
816 if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
817 (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
818 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
819 "shifter state = 0x%llx", GETFIELD(
820 SPI_STS_SHIFTER_FSM, s->status));
821 /*
822 * Set sequencer FSM error bit 3 (general_SPI_status[3])
823 * in status reg.
824 */
825 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
826 trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
827 stop = true;
828 } else {
829 /*
830 * Look for the special case where there is a shift_n1 set for
831 * transmit and it is followed by a shift_n2 set for transmit
832 * AND the combined transmit length of the two operations is
833 * less than or equal to the size of the TDR register. In this
834 * case we want to use both this current shift_n1 opcode and the
835 * following shift_n2 opcode to assemble the frame for
836 * transmission to the responder without requiring a refill of
837 * the TDR between the two operations.
838 */
839 if (PNV_SPI_MASKED_OPCODE(s->seq_op[get_seq_index(s) + 1])
840 == SEQ_OP_SHIFT_N2) {
841 send_n1_alone = false;
842 }
843 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
844 FSM_SHIFT_N1);
845 stop = operation_shiftn1(s, opcode, &payload, send_n1_alone);
846 if (stop) {
847 /*
848 * The operation code says to stop, this can occur if:
849 * (1) RDR is full and the N1 shift is set for receive
850 * (2) TDR was empty at the time of the N1 shift so we need
851 * to wait for data.
852 * (3) Neither 1 nor 2 are occurring and we aren't sending
853 * N1 alone and N2 counter reload is set (bit 0 of the N2
854 * counter reload field). In this case TDR_underrun will
855 * will be set and the Payload has been loaded so it is
856 * ok to advance the sequencer.
857 */
858 if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
859 s->shift_n1_done = true;
860 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
861 FSM_SHIFT_N2);
862 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
863 (get_seq_index(s) + 1));
864 } else {
865 /*
866 * This is case (1) or (2) so the sequencer needs to
867 * wait and NOT go to the next sequence yet.
868 */
869 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
870 FSM_WAIT);
871 }
872 } else {
873 /* Ok to move on to the next index */
874 s->shift_n1_done = true;
875 next_sequencer_fsm(s);
876 }
877 }
878 break;
879
880 case SEQ_OP_SHIFT_N2:
881 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
882 trace_pnv_spi_sequencer_op("SHIFT_N2", get_seq_index(s));
883 if (!s->shift_n1_done) {
884 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
885 "Shift_N1 is not done, shifter state = 0x%llx",
886 GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
887 /*
888 * In case the sequencer actually stops if an N2 shift is
889 * requested before any N1 shift is done. Set sequencer FSM
890 * error bit 3 (general_SPI_status[3]) in status reg.
891 */
892 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
893 trace_pnv_spi_sequencer_stop_requested("shift_n2 "
894 "w/no shift_n1 done");
895 stop = true;
896 } else {
897 /* Ok to do a Shift_N2 */
898 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
899 FSM_SHIFT_N2);
900 stop = operation_shiftn2(s, opcode, &payload);
901 /*
902 * If the operation code says to stop set the shifter state to
903 * wait and stop
904 */
905 if (stop) {
906 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
907 FSM_WAIT);
908 } else {
909 /* Ok to move on to the next index */
910 next_sequencer_fsm(s);
911 }
912 }
913 break;
914
915 case SEQ_OP_BRANCH_IFNEQ_RDR:
916 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
917 trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", get_seq_index(s));
918 /*
919 * The memory mapping register RDR match value is compared against
920 * the 16 rightmost bytes of the RDR (potentially with masking).
921 * Since this comparison is performed against the contents of the
922 * RDR then a receive must have previously occurred otherwise
923 * there is no data to compare and the operation cannot be
924 * completed and will stop the sequencer until RDR full is set to
925 * 1.
926 */
927 if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
928 bool rdr_matched = false;
929 rdr_matched = does_rdr_match(s);
930 if (rdr_matched) {
931 trace_pnv_spi_RDR_match("success");
932 /* A match occurred, increment the sequencer index. */
933 next_sequencer_fsm(s);
934 } else {
935 trace_pnv_spi_RDR_match("failed");
936 /*
937 * Branch the sequencer to the index coded into the op
938 * code.
939 */
940 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
941 PNV_SPI_OPCODE_LO_NIBBLE(opcode));
942 }
943 /*
944 * Regardless of where the branch ended up we want the
945 * sequencer to continue shifting so we have to clear
946 * RDR_full.
947 */
948 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
949 } else {
950 trace_pnv_spi_sequencer_stop_requested("RDR not"
951 "full for 0x6x opcode");
952 stop = true;
953 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
954 }
955 break;
956
957 case SEQ_OP_TRANSFER_TDR:
958 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
959 qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
960 next_sequencer_fsm(s);
961 break;
962
963 case SEQ_OP_BRANCH_IFNEQ_INC_1:
964 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
965 trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", get_seq_index(s));
966 /*
967 * The spec says the loop should execute count compare + 1 times.
968 * However we learned from engineering that we really only loop
969 * count_compare times, count compare = 0 makes this op code a
970 * no-op
971 */
972 if (s->loop_counter_1 !=
973 GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
974 /*
975 * Next index is the lower nibble of the branch operation ID,
976 * mask off all but the first three bits so we don't try to
977 * access beyond the sequencer_operation_reg boundary.
978 */
979 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
980 PNV_SPI_OPCODE_LO_NIBBLE(opcode));
981 s->loop_counter_1++;
982 } else {
983 /* Continue to next index if loop counter is reached */
984 next_sequencer_fsm(s);
985 }
986 break;
987
988 case SEQ_OP_BRANCH_IFNEQ_INC_2:
989 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
990 trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", get_seq_index(s));
991 uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
992 s->regs[SPI_CTR_CFG_REG]);
993 /*
994 * The spec says the loop should execute count compare + 1 times.
995 * However we learned from engineering that we really only loop
996 * count_compare times, count compare = 0 makes this op code a
997 * no-op
998 */
999 if (s->loop_counter_2 != condition2) {
1000 /*
1001 * Next index is the lower nibble of the branch operation ID,
1002 * mask off all but the first three bits so we don't try to
1003 * access beyond the sequencer_operation_reg boundary.
1004 */
1005 s->status = SETFIELD(SPI_STS_SEQ_INDEX,
1006 s->status, PNV_SPI_OPCODE_LO_NIBBLE(opcode));
1007 s->loop_counter_2++;
1008 } else {
1009 /* Continue to next index if loop counter is reached */
1010 next_sequencer_fsm(s);
1011 }
1012 break;
1013
1014 default:
1015 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
1016 /* Ignore unsupported operations. */
1017 next_sequencer_fsm(s);
1018 break;
1019 } /* end of switch */
1020 /*
1021 * If we used all 8 opcodes without seeing a 00 - STOP in the sequence
1022 * we need to go ahead and end things as if there was a STOP at the
1023 * end.
1024 */
1025 if (get_seq_index(s) == NUM_SEQ_OPS) {
1026 /* All 8 opcodes completed, sequencer idling */
1027 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
1028 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
1029 s->loop_counter_1 = 0;
1030 s->loop_counter_2 = 0;
1031 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
1032 break;
1033 }
1034 /* Break the loop if a stop was requested */
1035 if (stop) {
1036 break;
1037 }
1038 } /* end of while */
1039 return;
1040 } /* end of operation_sequencer() */
1041
1042 /*
1043 * The SPIC engine and its internal sequencer can be interrupted and reset by
1044 * a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
1045 * Miscellaneous Register of sbe_register_bo device.
1046 * Reset immediately aborts any SPI transaction in progress and returns the
1047 * sequencer and state machines to idle state.
1048 * The configuration register values are not changed. The status register is
1049 * not reset. The engine registers are not reset.
1050 * The SPIC engine reset does not have any affect on the attached devices.
1051 * Reset handling of any attached devices is beyond the scope of the engine.
1052 */
do_reset(DeviceState * dev)1053 static void do_reset(DeviceState *dev)
1054 {
1055 PnvSpi *s = PNV_SPI(dev);
1056 DeviceState *ssi_dev;
1057
1058 trace_pnv_spi_reset();
1059
1060 /* Connect cs irq */
1061 ssi_dev = ssi_get_cs(s->ssi_bus, 0);
1062 if (ssi_dev) {
1063 qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
1064 qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
1065 }
1066
1067 /* Reset all N1 and N2 counters, and other constants */
1068 s->N2_bits = 0;
1069 s->N2_bytes = 0;
1070 s->N2_tx = 0;
1071 s->N2_rx = 0;
1072 s->N1_bits = 0;
1073 s->N1_bytes = 0;
1074 s->N1_tx = 0;
1075 s->N1_rx = 0;
1076 s->loop_counter_1 = 0;
1077 s->loop_counter_2 = 0;
1078 /* Disconnected from responder */
1079 qemu_set_irq(s->cs_line[0], 1);
1080 }
1081
pnv_spi_xscom_read(void * opaque,hwaddr addr,unsigned size)1082 static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
1083 {
1084 PnvSpi *s = PNV_SPI(opaque);
1085 uint32_t reg = addr >> 3;
1086 uint64_t val = ~0ull;
1087
1088 switch (reg) {
1089 case ERROR_REG:
1090 case SPI_CTR_CFG_REG:
1091 case CONFIG_REG1:
1092 case SPI_CLK_CFG_REG:
1093 case SPI_MM_REG:
1094 case SPI_XMIT_DATA_REG:
1095 val = s->regs[reg];
1096 break;
1097 case SPI_RCV_DATA_REG:
1098 val = s->regs[reg];
1099 trace_pnv_spi_read_RDR(val);
1100 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
1101 if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
1102 trace_pnv_spi_start_sequencer();
1103 operation_sequencer(s);
1104 }
1105 break;
1106 case SPI_SEQ_OP_REG:
1107 val = 0;
1108 for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1109 val = (val << 8) | s->seq_op[i];
1110 }
1111 break;
1112 case SPI_STS_REG:
1113 val = s->status;
1114 break;
1115 default:
1116 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1117 "read at 0x%" PRIx32 "\n", reg);
1118 }
1119
1120 trace_pnv_spi_read(addr, val);
1121 return val;
1122 }
1123
pnv_spi_xscom_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1124 static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
1125 uint64_t val, unsigned size)
1126 {
1127 PnvSpi *s = PNV_SPI(opaque);
1128 uint32_t reg = addr >> 3;
1129
1130 trace_pnv_spi_write(addr, val);
1131
1132 switch (reg) {
1133 case ERROR_REG:
1134 case SPI_CTR_CFG_REG:
1135 case CONFIG_REG1:
1136 case SPI_MM_REG:
1137 case SPI_RCV_DATA_REG:
1138 s->regs[reg] = val;
1139 break;
1140 case SPI_CLK_CFG_REG:
1141 /*
1142 * To reset the SPI controller write the sequence 0x5 0xA to
1143 * reset_control field
1144 */
1145 if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
1146 && (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
1147 /* SPI controller reset sequence completed, resetting */
1148 s->regs[reg] = SPI_CLK_CFG_HARD_RST;
1149 } else {
1150 s->regs[reg] = val;
1151 }
1152 break;
1153 case SPI_XMIT_DATA_REG:
1154 /*
1155 * Writing to the transmit data register causes the transmit data
1156 * register full status bit in the status register to be set. Writing
1157 * when the transmit data register full status bit is already set
1158 * causes a "Resource Not Available" condition. This is not possible
1159 * in the model since writes to this register are not asynchronous to
1160 * the operation sequence like it would be in hardware.
1161 */
1162 s->regs[reg] = val;
1163 trace_pnv_spi_write_TDR(val);
1164 s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
1165 s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
1166 trace_pnv_spi_start_sequencer();
1167 operation_sequencer(s);
1168 break;
1169 case SPI_SEQ_OP_REG:
1170 for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1171 s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
1172 }
1173 break;
1174 case SPI_STS_REG:
1175 /* other fields are ignore_write */
1176 s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
1177 GETFIELD(SPI_STS_RDR, val));
1178 s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
1179 GETFIELD(SPI_STS_TDR, val));
1180 break;
1181 default:
1182 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1183 "write at 0x%" PRIx32 "\n", reg);
1184 }
1185 return;
1186 }
1187
1188 static const MemoryRegionOps pnv_spi_xscom_ops = {
1189 .read = pnv_spi_xscom_read,
1190 .write = pnv_spi_xscom_write,
1191 .valid.min_access_size = 8,
1192 .valid.max_access_size = 8,
1193 .impl.min_access_size = 8,
1194 .impl.max_access_size = 8,
1195 .endianness = DEVICE_BIG_ENDIAN,
1196 };
1197
1198 static Property pnv_spi_properties[] = {
1199 DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
1200 DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
1201 DEFINE_PROP_END_OF_LIST(),
1202 };
1203
pnv_spi_realize(DeviceState * dev,Error ** errp)1204 static void pnv_spi_realize(DeviceState *dev, Error **errp)
1205 {
1206 PnvSpi *s = PNV_SPI(dev);
1207 g_autofree char *name = g_strdup_printf(TYPE_PNV_SPI_BUS ".%d",
1208 s->spic_num);
1209 s->ssi_bus = ssi_create_bus(dev, name);
1210 s->cs_line = g_new0(qemu_irq, 1);
1211 qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
1212
1213 /* spi scoms */
1214 pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
1215 s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
1216 }
1217
pnv_spi_dt_xscom(PnvXScomInterface * dev,void * fdt,int offset)1218 static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
1219 int offset)
1220 {
1221 PnvSpi *s = PNV_SPI(dev);
1222 g_autofree char *name;
1223 int s_offset;
1224 const char compat[] = "ibm,power10-spi";
1225 uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
1226 s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
1227 uint32_t reg[] = {
1228 cpu_to_be32(spic_pcba),
1229 cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
1230 };
1231 name = g_strdup_printf("pnv_spi@%x", spic_pcba);
1232 s_offset = fdt_add_subnode(fdt, offset, name);
1233 _FDT(s_offset);
1234
1235 _FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
1236 _FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
1237 _FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
1238 return 0;
1239 }
1240
pnv_spi_class_init(ObjectClass * klass,void * data)1241 static void pnv_spi_class_init(ObjectClass *klass, void *data)
1242 {
1243 DeviceClass *dc = DEVICE_CLASS(klass);
1244 PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
1245
1246 xscomc->dt_xscom = pnv_spi_dt_xscom;
1247
1248 dc->desc = "PowerNV SPI";
1249 dc->realize = pnv_spi_realize;
1250 device_class_set_legacy_reset(dc, do_reset);
1251 device_class_set_props(dc, pnv_spi_properties);
1252 }
1253
1254 static const TypeInfo pnv_spi_info = {
1255 .name = TYPE_PNV_SPI,
1256 .parent = TYPE_SYS_BUS_DEVICE,
1257 .instance_size = sizeof(PnvSpi),
1258 .class_init = pnv_spi_class_init,
1259 .interfaces = (InterfaceInfo[]) {
1260 { TYPE_PNV_XSCOM_INTERFACE },
1261 { }
1262 }
1263 };
1264
pnv_spi_register_types(void)1265 static void pnv_spi_register_types(void)
1266 {
1267 type_register_static(&pnv_spi_info);
1268 }
1269
1270 type_init(pnv_spi_register_types);
1271