xref: /openbmc/qemu/hw/scsi/esp.c (revision a56ac09f5c37f57059c2a2c5ae6aeff7f7241a84)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
esp_raise_irq(ESPState * s)46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
esp_lower_irq(ESPState * s)55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
esp_raise_drq(ESPState * s)64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
esp_lower_drq(ESPState * s)73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 static const char *esp_phase_names[8] = {
83     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86 
esp_set_phase(ESPState * s,uint8_t phase)87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89     s->rregs[ESP_RSTAT] &= ~7;
90     s->rregs[ESP_RSTAT] |= phase;
91 
92     trace_esp_set_phase(esp_phase_names[phase]);
93 }
94 
esp_get_phase(ESPState * s)95 static uint8_t esp_get_phase(ESPState *s)
96 {
97     return s->rregs[ESP_RSTAT] & 7;
98 }
99 
esp_dma_enable(ESPState * s,int irq,int level)100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102     if (level) {
103         s->dma_enabled = 1;
104         trace_esp_dma_enable();
105         if (s->dma_cb) {
106             s->dma_cb(s);
107             s->dma_cb = NULL;
108         }
109     } else {
110         trace_esp_dma_disable();
111         s->dma_enabled = 0;
112     }
113 }
114 
esp_request_cancelled(SCSIRequest * req)115 void esp_request_cancelled(SCSIRequest *req)
116 {
117     ESPState *s = req->hba_private;
118 
119     if (req == s->current_req) {
120         scsi_req_unref(s->current_req);
121         s->current_req = NULL;
122         s->current_dev = NULL;
123         s->async_len = 0;
124     }
125 }
126 
esp_update_drq(ESPState * s)127 static void esp_update_drq(ESPState *s)
128 {
129     bool to_device;
130 
131     switch (esp_get_phase(s)) {
132     case STAT_MO:
133     case STAT_CD:
134     case STAT_DO:
135         to_device = true;
136         break;
137 
138     case STAT_DI:
139     case STAT_ST:
140     case STAT_MI:
141         to_device = false;
142         break;
143 
144     default:
145         return;
146     }
147 
148     if (s->dma) {
149         /* DMA request so update DRQ according to transfer direction */
150         if (to_device) {
151             if (fifo8_num_free(&s->fifo) < 2) {
152                 esp_lower_drq(s);
153             } else {
154                 esp_raise_drq(s);
155             }
156         } else {
157             if (fifo8_num_used(&s->fifo) < 2) {
158                 esp_lower_drq(s);
159             } else {
160                 esp_raise_drq(s);
161             }
162         }
163     } else {
164         /* Not a DMA request */
165         esp_lower_drq(s);
166     }
167 }
168 
esp_fifo_push(ESPState * s,uint8_t val)169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172         trace_esp_error_fifo_overrun();
173     } else {
174         fifo8_push(&s->fifo, val);
175     }
176 
177     esp_update_drq(s);
178 }
179 
esp_fifo_push_buf(ESPState * s,uint8_t * buf,int len)180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182     fifo8_push_all(&s->fifo, buf, len);
183     esp_update_drq(s);
184 }
185 
esp_fifo_pop(ESPState * s)186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188     uint8_t val;
189 
190     if (fifo8_is_empty(&s->fifo)) {
191         val = 0;
192     } else {
193         val = fifo8_pop(&s->fifo);
194     }
195 
196     esp_update_drq(s);
197     return val;
198 }
199 
esp_fifo_pop_buf(ESPState * s,uint8_t * dest,int maxlen)200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202     uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203 
204     esp_update_drq(s);
205     return len;
206 }
207 
esp_get_tc(ESPState * s)208 static uint32_t esp_get_tc(ESPState *s)
209 {
210     uint32_t dmalen;
211 
212     dmalen = s->rregs[ESP_TCLO];
213     dmalen |= s->rregs[ESP_TCMID] << 8;
214     dmalen |= s->rregs[ESP_TCHI] << 16;
215 
216     return dmalen;
217 }
218 
esp_set_tc(ESPState * s,uint32_t dmalen)219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221     uint32_t old_tc = esp_get_tc(s);
222 
223     s->rregs[ESP_TCLO] = dmalen;
224     s->rregs[ESP_TCMID] = dmalen >> 8;
225     s->rregs[ESP_TCHI] = dmalen >> 16;
226 
227     if (old_tc && dmalen == 0) {
228         s->rregs[ESP_RSTAT] |= STAT_TC;
229     }
230 }
231 
esp_get_stc(ESPState * s)232 static uint32_t esp_get_stc(ESPState *s)
233 {
234     uint32_t dmalen;
235 
236     dmalen = s->wregs[ESP_TCLO];
237     dmalen |= s->wregs[ESP_TCMID] << 8;
238     dmalen |= s->wregs[ESP_TCHI] << 16;
239 
240     return dmalen;
241 }
242 
esp_pdma_read(ESPState * s)243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245     return esp_fifo_pop(s);
246 }
247 
esp_pdma_write(ESPState * s,uint8_t val)248 static void esp_pdma_write(ESPState *s, uint8_t val)
249 {
250     uint32_t dmalen = esp_get_tc(s);
251 
252     esp_fifo_push(s, val);
253 
254     if (dmalen && s->drq_state) {
255         dmalen--;
256         esp_set_tc(s, dmalen);
257     }
258 }
259 
esp_select(ESPState * s)260 static int esp_select(ESPState *s)
261 {
262     int target;
263 
264     target = s->wregs[ESP_WBUSID] & BUSID_DID;
265 
266     s->ti_size = 0;
267     s->rregs[ESP_RSEQ] = SEQ_0;
268 
269     if (s->current_req) {
270         /* Started a new command before the old one finished. Cancel it. */
271         scsi_req_cancel(s->current_req);
272     }
273 
274     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
275     if (!s->current_dev) {
276         /* No such drive */
277         s->rregs[ESP_RSTAT] = 0;
278         s->asc_mode = ESP_ASC_MODE_DIS;
279         s->rregs[ESP_RINTR] = INTR_DC;
280         esp_raise_irq(s);
281         return -1;
282     }
283 
284     /*
285      * Note that we deliberately don't raise the IRQ here: this will be done
286      * either in esp_transfer_data() or esp_command_complete()
287      */
288     s->asc_mode = ESP_ASC_MODE_INI;
289     return 0;
290 }
291 
292 static void esp_do_dma(ESPState *s);
293 static void esp_do_nodma(ESPState *s);
294 
do_command_phase(ESPState * s)295 static void do_command_phase(ESPState *s)
296 {
297     uint32_t cmdlen;
298     int32_t datalen;
299     SCSIDevice *current_lun;
300     uint8_t buf[ESP_CMDFIFO_SZ];
301 
302     trace_esp_do_command_phase(s->lun);
303     cmdlen = fifo8_num_used(&s->cmdfifo);
304     if (!cmdlen || !s->current_dev) {
305         return;
306     }
307     fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
308 
309     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
310     if (!current_lun) {
311         /* No such drive */
312         s->rregs[ESP_RSTAT] = 0;
313         s->asc_mode = ESP_ASC_MODE_DIS;
314         s->rregs[ESP_RINTR] = INTR_DC;
315         s->rregs[ESP_RSEQ] = SEQ_0;
316         esp_raise_irq(s);
317         return;
318     }
319 
320     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
321     datalen = scsi_req_enqueue(s->current_req);
322     s->ti_size = datalen;
323     fifo8_reset(&s->cmdfifo);
324     s->data_ready = false;
325     if (datalen != 0) {
326         /*
327          * Switch to DATA phase but wait until initial data xfer is
328          * complete before raising the command completion interrupt
329          */
330         if (datalen > 0) {
331             esp_set_phase(s, STAT_DI);
332         } else {
333             esp_set_phase(s, STAT_DO);
334         }
335         scsi_req_continue(s->current_req);
336         return;
337     }
338 }
339 
do_message_phase(ESPState * s)340 static void do_message_phase(ESPState *s)
341 {
342     if (s->cmdfifo_cdb_offset) {
343         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
344                           fifo8_pop(&s->cmdfifo);
345 
346         trace_esp_do_identify(message);
347         s->lun = message & 7;
348         s->cmdfifo_cdb_offset--;
349     }
350 
351     /* Ignore extended messages for now */
352     if (s->cmdfifo_cdb_offset) {
353         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
354         fifo8_drop(&s->cmdfifo, len);
355         s->cmdfifo_cdb_offset = 0;
356     }
357 }
358 
do_cmd(ESPState * s)359 static void do_cmd(ESPState *s)
360 {
361     do_message_phase(s);
362     assert(s->cmdfifo_cdb_offset == 0);
363     do_command_phase(s);
364 }
365 
handle_satn(ESPState * s)366 static void handle_satn(ESPState *s)
367 {
368     if (s->dma && !s->dma_enabled) {
369         s->dma_cb = handle_satn;
370         return;
371     }
372 
373     if (esp_select(s) < 0) {
374         return;
375     }
376 
377     esp_set_phase(s, STAT_MO);
378 
379     if (s->dma) {
380         esp_do_dma(s);
381     } else {
382         esp_do_nodma(s);
383     }
384 }
385 
handle_s_without_atn(ESPState * s)386 static void handle_s_without_atn(ESPState *s)
387 {
388     if (s->dma && !s->dma_enabled) {
389         s->dma_cb = handle_s_without_atn;
390         return;
391     }
392 
393     if (esp_select(s) < 0) {
394         return;
395     }
396 
397     esp_set_phase(s, STAT_CD);
398     s->cmdfifo_cdb_offset = 0;
399 
400     if (s->dma) {
401         esp_do_dma(s);
402     } else {
403         esp_do_nodma(s);
404     }
405 }
406 
handle_satn_stop(ESPState * s)407 static void handle_satn_stop(ESPState *s)
408 {
409     if (s->dma && !s->dma_enabled) {
410         s->dma_cb = handle_satn_stop;
411         return;
412     }
413 
414     if (esp_select(s) < 0) {
415         return;
416     }
417 
418     esp_set_phase(s, STAT_MO);
419     s->cmdfifo_cdb_offset = 0;
420 
421     if (s->dma) {
422         esp_do_dma(s);
423     } else {
424         esp_do_nodma(s);
425     }
426 }
427 
handle_pad(ESPState * s)428 static void handle_pad(ESPState *s)
429 {
430     if (s->dma) {
431         esp_do_dma(s);
432     } else {
433         esp_do_nodma(s);
434     }
435 }
436 
write_response(ESPState * s)437 static void write_response(ESPState *s)
438 {
439     trace_esp_write_response(s->status);
440 
441     if (s->dma) {
442         esp_do_dma(s);
443     } else {
444         esp_do_nodma(s);
445     }
446 }
447 
esp_cdb_ready(ESPState * s)448 static bool esp_cdb_ready(ESPState *s)
449 {
450     int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
451     const uint8_t *pbuf;
452     uint32_t n;
453     int cdblen;
454 
455     if (len <= 0) {
456         return false;
457     }
458 
459     pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
460     if (n < len) {
461         /*
462          * In normal use the cmdfifo should never wrap, but include this check
463          * to prevent a malicious guest from reading past the end of the
464          * cmdfifo data buffer below
465          */
466         return false;
467     }
468 
469     cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
470 
471     return cdblen < 0 ? false : (len >= cdblen);
472 }
473 
esp_dma_ti_check(ESPState * s)474 static void esp_dma_ti_check(ESPState *s)
475 {
476     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
477         s->rregs[ESP_RINTR] |= INTR_BS;
478         esp_raise_irq(s);
479     }
480 }
481 
esp_do_dma(ESPState * s)482 static void esp_do_dma(ESPState *s)
483 {
484     uint32_t len, cmdlen;
485     uint8_t buf[ESP_CMDFIFO_SZ];
486 
487     len = esp_get_tc(s);
488 
489     switch (esp_get_phase(s)) {
490     case STAT_MO:
491         if (s->dma_memory_read) {
492             len = MIN(len, fifo8_num_free(&s->cmdfifo));
493             if (len) {
494                 s->dma_memory_read(s->dma_opaque, buf, len);
495                 esp_set_tc(s, esp_get_tc(s) - len);
496             }
497         } else {
498             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
499             len = MIN(fifo8_num_free(&s->cmdfifo), len);
500         }
501 
502         fifo8_push_all(&s->cmdfifo, buf, len);
503         s->cmdfifo_cdb_offset += len;
504 
505         switch (s->rregs[ESP_CMD]) {
506         case CMD_SELATN | CMD_DMA:
507             if (fifo8_num_used(&s->cmdfifo) >= 1) {
508                 /* First byte received, switch to command phase */
509                 esp_set_phase(s, STAT_CD);
510                 s->rregs[ESP_RSEQ] = SEQ_CD;
511                 s->cmdfifo_cdb_offset = 1;
512 
513                 if (fifo8_num_used(&s->cmdfifo) > 1) {
514                     /* Process any additional command phase data */
515                     esp_do_dma(s);
516                 }
517             }
518             break;
519 
520         case CMD_SELATNS | CMD_DMA:
521             if (fifo8_num_used(&s->cmdfifo) == 1) {
522                 /* First byte received, stop in message out phase */
523                 s->rregs[ESP_RSEQ] = SEQ_MO;
524                 s->cmdfifo_cdb_offset = 1;
525 
526                 /* Raise command completion interrupt */
527                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
528                 esp_raise_irq(s);
529             }
530             break;
531 
532         case CMD_TI | CMD_DMA:
533             /* ATN remains asserted until TC == 0 */
534             if (esp_get_tc(s) == 0) {
535                 esp_set_phase(s, STAT_CD);
536                 s->rregs[ESP_CMD] = 0;
537                 s->rregs[ESP_RINTR] |= INTR_BS;
538                 esp_raise_irq(s);
539             }
540             break;
541         }
542         break;
543 
544     case STAT_CD:
545         cmdlen = fifo8_num_used(&s->cmdfifo);
546         trace_esp_do_dma(cmdlen, len);
547         if (s->dma_memory_read) {
548             len = MIN(len, fifo8_num_free(&s->cmdfifo));
549             if (len) {
550                 s->dma_memory_read(s->dma_opaque, buf, len);
551                 fifo8_push_all(&s->cmdfifo, buf, len);
552                 esp_set_tc(s, esp_get_tc(s) - len);
553             }
554         } else {
555             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
556             len = MIN(fifo8_num_free(&s->cmdfifo), len);
557             fifo8_push_all(&s->cmdfifo, buf, len);
558         }
559         trace_esp_handle_ti_cmd(cmdlen);
560         s->ti_size = 0;
561         if (esp_get_tc(s) == 0) {
562             /* Command has been received */
563             do_cmd(s);
564         }
565         break;
566 
567     case STAT_DO:
568         if (!s->current_req) {
569             return;
570         }
571         if (s->async_len == 0 && esp_get_tc(s)) {
572             /* Defer until data is available.  */
573             return;
574         }
575         if (len > s->async_len) {
576             len = s->async_len;
577         }
578 
579         switch (s->rregs[ESP_CMD]) {
580         case CMD_TI | CMD_DMA:
581             if (s->dma_memory_read) {
582                 if (len) {
583                     s->dma_memory_read(s->dma_opaque, s->async_buf, len);
584                     esp_set_tc(s, esp_get_tc(s) - len);
585                 }
586             } else {
587                 /* Copy FIFO data to device */
588                 len = MIN(s->async_len, ESP_FIFO_SZ);
589                 len = MIN(len, fifo8_num_used(&s->fifo));
590                 len = esp_fifo_pop_buf(s, s->async_buf, len);
591             }
592 
593             s->async_buf += len;
594             s->async_len -= len;
595             s->ti_size += len;
596             break;
597 
598         case CMD_PAD | CMD_DMA:
599             /* Copy TC zero bytes into the incoming stream */
600             if (!s->dma_memory_read) {
601                 len = MIN(s->async_len, ESP_FIFO_SZ);
602                 len = MIN(len, fifo8_num_free(&s->fifo));
603             }
604 
605             memset(s->async_buf, 0, len);
606 
607             s->async_buf += len;
608             s->async_len -= len;
609             s->ti_size += len;
610             break;
611         }
612 
613         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
614             /* Defer until the scsi layer has completed */
615             scsi_req_continue(s->current_req);
616             return;
617         }
618 
619         esp_dma_ti_check(s);
620         break;
621 
622     case STAT_DI:
623         if (!s->current_req) {
624             return;
625         }
626         if (s->async_len == 0 && esp_get_tc(s)) {
627             /* Defer until data is available.  */
628             return;
629         }
630         if (len > s->async_len) {
631             len = s->async_len;
632         }
633 
634         switch (s->rregs[ESP_CMD]) {
635         case CMD_TI | CMD_DMA:
636             if (s->dma_memory_write) {
637                 if (len) {
638                     s->dma_memory_write(s->dma_opaque, s->async_buf, len);
639                 }
640             } else {
641                 /* Copy device data to FIFO */
642                 len = MIN(len, fifo8_num_free(&s->fifo));
643                 esp_fifo_push_buf(s, s->async_buf, len);
644             }
645 
646             s->async_buf += len;
647             s->async_len -= len;
648             s->ti_size -= len;
649             esp_set_tc(s, esp_get_tc(s) - len);
650             break;
651 
652         case CMD_PAD | CMD_DMA:
653             /* Drop TC bytes from the incoming stream */
654             if (!s->dma_memory_write) {
655                 len = MIN(len, fifo8_num_free(&s->fifo));
656             }
657 
658             s->async_buf += len;
659             s->async_len -= len;
660             s->ti_size -= len;
661             esp_set_tc(s, esp_get_tc(s) - len);
662             break;
663         }
664 
665         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
666             /* If the guest underflows TC then terminate SCSI request */
667             scsi_req_continue(s->current_req);
668             return;
669         }
670 
671         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
672             /* Defer until the scsi layer has completed */
673             scsi_req_continue(s->current_req);
674             return;
675         }
676 
677         esp_dma_ti_check(s);
678         break;
679 
680     case STAT_ST:
681         switch (s->rregs[ESP_CMD]) {
682         case CMD_ICCS | CMD_DMA:
683             len = MIN(len, 1);
684 
685             if (len) {
686                 buf[0] = s->status;
687 
688                 if (s->dma_memory_write) {
689                     /* Length already non-zero */
690                     s->dma_memory_write(s->dma_opaque, buf, len);
691                 } else {
692                     esp_fifo_push_buf(s, buf, len);
693                 }
694 
695                 esp_set_tc(s, esp_get_tc(s) - len);
696                 esp_set_phase(s, STAT_MI);
697 
698                 if (esp_get_tc(s) > 0) {
699                     /* Process any message in phase data */
700                     esp_do_dma(s);
701                 }
702             }
703             break;
704 
705         default:
706             /* Consume remaining data if the guest underflows TC */
707             if (fifo8_num_used(&s->fifo) < 2) {
708                 s->rregs[ESP_RINTR] |= INTR_BS;
709                 esp_raise_irq(s);
710             }
711             break;
712         }
713         break;
714 
715     case STAT_MI:
716         switch (s->rregs[ESP_CMD]) {
717         case CMD_ICCS | CMD_DMA:
718             len = MIN(len, 1);
719 
720             if (len) {
721                 buf[0] = 0;
722 
723                 if (s->dma_memory_write) {
724                     /* Length already non-zero */
725                     s->dma_memory_write(s->dma_opaque, buf, len);
726                 } else {
727                     esp_fifo_push_buf(s, buf, len);
728                 }
729 
730                 esp_set_tc(s, esp_get_tc(s) - len);
731 
732                 /* Raise end of command interrupt */
733                 s->rregs[ESP_RINTR] |= INTR_FC;
734                 esp_raise_irq(s);
735             }
736             break;
737         }
738         break;
739     }
740 }
741 
esp_nodma_ti_dataout(ESPState * s)742 static void esp_nodma_ti_dataout(ESPState *s)
743 {
744     int len;
745 
746     if (!s->current_req) {
747         return;
748     }
749     if (s->async_len == 0) {
750         /* Defer until data is available.  */
751         return;
752     }
753     len = MIN(s->async_len, ESP_FIFO_SZ);
754     len = MIN(len, fifo8_num_used(&s->fifo));
755     esp_fifo_pop_buf(s, s->async_buf, len);
756     s->async_buf += len;
757     s->async_len -= len;
758     s->ti_size += len;
759 
760     if (s->async_len == 0) {
761         scsi_req_continue(s->current_req);
762         return;
763     }
764 
765     s->rregs[ESP_RINTR] |= INTR_BS;
766     esp_raise_irq(s);
767 }
768 
esp_do_nodma(ESPState * s)769 static void esp_do_nodma(ESPState *s)
770 {
771     uint8_t buf[ESP_FIFO_SZ];
772     uint32_t cmdlen;
773     int len;
774 
775     switch (esp_get_phase(s)) {
776     case STAT_MO:
777         switch (s->rregs[ESP_CMD]) {
778         case CMD_SELATN:
779             /* Copy FIFO into cmdfifo */
780             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
781             len = MIN(fifo8_num_free(&s->cmdfifo), len);
782             fifo8_push_all(&s->cmdfifo, buf, len);
783 
784             if (fifo8_num_used(&s->cmdfifo) >= 1) {
785                 /* First byte received, switch to command phase */
786                 esp_set_phase(s, STAT_CD);
787                 s->rregs[ESP_RSEQ] = SEQ_CD;
788                 s->cmdfifo_cdb_offset = 1;
789 
790                 if (fifo8_num_used(&s->cmdfifo) > 1) {
791                     /* Process any additional command phase data */
792                     esp_do_nodma(s);
793                 }
794             }
795             break;
796 
797         case CMD_SELATNS:
798             /* Copy one byte from FIFO into cmdfifo */
799             len = esp_fifo_pop_buf(s, buf,
800                                    MIN(fifo8_num_used(&s->fifo), 1));
801             len = MIN(fifo8_num_free(&s->cmdfifo), len);
802             fifo8_push_all(&s->cmdfifo, buf, len);
803 
804             if (fifo8_num_used(&s->cmdfifo) >= 1) {
805                 /* First byte received, stop in message out phase */
806                 s->rregs[ESP_RSEQ] = SEQ_MO;
807                 s->cmdfifo_cdb_offset = 1;
808 
809                 /* Raise command completion interrupt */
810                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
811                 esp_raise_irq(s);
812             }
813             break;
814 
815         case CMD_TI:
816             /* Copy FIFO into cmdfifo */
817             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
818             len = MIN(fifo8_num_free(&s->cmdfifo), len);
819             fifo8_push_all(&s->cmdfifo, buf, len);
820 
821             /* ATN remains asserted until FIFO empty */
822             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
823             esp_set_phase(s, STAT_CD);
824             s->rregs[ESP_CMD] = 0;
825             s->rregs[ESP_RINTR] |= INTR_BS;
826             esp_raise_irq(s);
827             break;
828         }
829         break;
830 
831     case STAT_CD:
832         switch (s->rregs[ESP_CMD]) {
833         case CMD_TI:
834             /* Copy FIFO into cmdfifo */
835             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
836             len = MIN(fifo8_num_free(&s->cmdfifo), len);
837             fifo8_push_all(&s->cmdfifo, buf, len);
838 
839             cmdlen = fifo8_num_used(&s->cmdfifo);
840             trace_esp_handle_ti_cmd(cmdlen);
841 
842             /* CDB may be transferred in one or more TI commands */
843             if (esp_cdb_ready(s)) {
844                 /* Command has been received */
845                 do_cmd(s);
846             } else {
847                 /*
848                  * If data was transferred from the FIFO then raise bus
849                  * service interrupt to indicate transfer complete. Otherwise
850                  * defer until the next FIFO write.
851                  */
852                 if (len) {
853                     /* Raise interrupt to indicate transfer complete */
854                     s->rregs[ESP_RINTR] |= INTR_BS;
855                     esp_raise_irq(s);
856                 }
857             }
858             break;
859 
860         case CMD_SEL | CMD_DMA:
861         case CMD_SELATN | CMD_DMA:
862             /* Copy FIFO into cmdfifo */
863             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
864             len = MIN(fifo8_num_free(&s->cmdfifo), len);
865             fifo8_push_all(&s->cmdfifo, buf, len);
866 
867             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
868             if (esp_cdb_ready(s)) {
869                 /* Command has been received */
870                 do_cmd(s);
871             }
872             break;
873 
874         case CMD_SEL:
875         case CMD_SELATN:
876             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
877             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
878             len = MIN(fifo8_num_free(&s->cmdfifo), len);
879             fifo8_push_all(&s->cmdfifo, buf, len);
880 
881             do_cmd(s);
882             break;
883         }
884         break;
885 
886     case STAT_DO:
887         /* Accumulate data in FIFO until non-DMA TI is executed */
888         break;
889 
890     case STAT_DI:
891         if (!s->current_req) {
892             return;
893         }
894         if (s->async_len == 0) {
895             /* Defer until data is available.  */
896             return;
897         }
898         if (fifo8_is_empty(&s->fifo)) {
899             esp_fifo_push(s, s->async_buf[0]);
900             s->async_buf++;
901             s->async_len--;
902             s->ti_size--;
903         }
904 
905         if (s->async_len == 0) {
906             scsi_req_continue(s->current_req);
907             return;
908         }
909 
910         /* If preloading the FIFO, defer until TI command issued */
911         if (s->rregs[ESP_CMD] != CMD_TI) {
912             return;
913         }
914 
915         s->rregs[ESP_RINTR] |= INTR_BS;
916         esp_raise_irq(s);
917         break;
918 
919     case STAT_ST:
920         switch (s->rregs[ESP_CMD]) {
921         case CMD_ICCS:
922             esp_fifo_push(s, s->status);
923             esp_set_phase(s, STAT_MI);
924 
925             /* Process any message in phase data */
926             esp_do_nodma(s);
927             break;
928         }
929         break;
930 
931     case STAT_MI:
932         switch (s->rregs[ESP_CMD]) {
933         case CMD_ICCS:
934             esp_fifo_push(s, 0);
935 
936             /* Raise end of command interrupt */
937             s->rregs[ESP_RINTR] |= INTR_FC;
938             esp_raise_irq(s);
939             break;
940         }
941         break;
942     }
943 }
944 
esp_command_complete(SCSIRequest * req,size_t resid)945 void esp_command_complete(SCSIRequest *req, size_t resid)
946 {
947     ESPState *s = req->hba_private;
948     int to_device = (esp_get_phase(s) == STAT_DO);
949 
950     trace_esp_command_complete();
951 
952     /*
953      * Non-DMA transfers from the target will leave the last byte in
954      * the FIFO so don't reset ti_size in this case
955      */
956     if (s->dma || to_device) {
957         if (s->ti_size != 0) {
958             trace_esp_command_complete_unexpected();
959         }
960     }
961 
962     s->async_len = 0;
963     if (req->status) {
964         trace_esp_command_complete_fail();
965     }
966     s->status = req->status;
967 
968     /*
969      * Switch to status phase. For non-DMA transfers from the target the last
970      * byte is still in the FIFO
971      */
972     s->ti_size = 0;
973 
974     switch (s->rregs[ESP_CMD]) {
975     case CMD_SEL | CMD_DMA:
976     case CMD_SEL:
977     case CMD_SELATN | CMD_DMA:
978     case CMD_SELATN:
979         /*
980          * No data phase for sequencer command so raise deferred bus service
981          * and function complete interrupt
982          */
983         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
984         s->rregs[ESP_RSEQ] = SEQ_CD;
985         break;
986 
987     case CMD_TI | CMD_DMA:
988     case CMD_TI:
989         s->rregs[ESP_CMD] = 0;
990         break;
991     }
992 
993     /* Raise bus service interrupt to indicate change to STATUS phase */
994     esp_set_phase(s, STAT_ST);
995     s->rregs[ESP_RINTR] |= INTR_BS;
996     esp_raise_irq(s);
997 
998     if (s->current_req) {
999         scsi_req_unref(s->current_req);
1000         s->current_req = NULL;
1001         s->current_dev = NULL;
1002     }
1003 }
1004 
esp_transfer_data(SCSIRequest * req,uint32_t len)1005 void esp_transfer_data(SCSIRequest *req, uint32_t len)
1006 {
1007     ESPState *s = req->hba_private;
1008     uint32_t dmalen = esp_get_tc(s);
1009 
1010     trace_esp_transfer_data(dmalen, s->ti_size);
1011     s->async_len = len;
1012     s->async_buf = scsi_req_get_buf(req);
1013 
1014     if (!s->data_ready) {
1015         s->data_ready = true;
1016 
1017         switch (s->rregs[ESP_CMD]) {
1018         case CMD_SEL | CMD_DMA:
1019         case CMD_SEL:
1020         case CMD_SELATN | CMD_DMA:
1021         case CMD_SELATN:
1022             /*
1023              * Initial incoming data xfer is complete for sequencer command
1024              * so raise deferred bus service and function complete interrupt
1025              */
1026              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1027              s->rregs[ESP_RSEQ] = SEQ_CD;
1028              esp_raise_irq(s);
1029              break;
1030 
1031         case CMD_SELATNS | CMD_DMA:
1032         case CMD_SELATNS:
1033             /*
1034              * Initial incoming data xfer is complete so raise command
1035              * completion interrupt
1036              */
1037              s->rregs[ESP_RINTR] |= INTR_BS;
1038              s->rregs[ESP_RSEQ] = SEQ_MO;
1039              esp_raise_irq(s);
1040              break;
1041 
1042         case CMD_TI | CMD_DMA:
1043         case CMD_TI:
1044             /*
1045              * If the final COMMAND phase data was transferred using a TI
1046              * command, clear ESP_CMD to terminate the TI command and raise
1047              * the completion interrupt
1048              */
1049             s->rregs[ESP_CMD] = 0;
1050             s->rregs[ESP_RINTR] |= INTR_BS;
1051             esp_raise_irq(s);
1052             break;
1053         }
1054     }
1055 
1056     /*
1057      * Always perform the initial transfer upon reception of the next TI
1058      * command to ensure the DMA/non-DMA status of the command is correct.
1059      * It is not possible to use s->dma directly in the section below as
1060      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1061      * async data transfer is delayed then s->dma is set incorrectly.
1062      */
1063 
1064     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1065         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1066         esp_dma_ti_check(s);
1067 
1068         esp_do_dma(s);
1069     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1070         esp_do_nodma(s);
1071     }
1072 }
1073 
handle_ti(ESPState * s)1074 static void handle_ti(ESPState *s)
1075 {
1076     uint32_t dmalen;
1077 
1078     if (s->dma && !s->dma_enabled) {
1079         s->dma_cb = handle_ti;
1080         return;
1081     }
1082 
1083     if (s->dma) {
1084         dmalen = esp_get_tc(s);
1085         trace_esp_handle_ti(dmalen);
1086         esp_do_dma(s);
1087     } else {
1088         trace_esp_handle_ti(s->ti_size);
1089         esp_do_nodma(s);
1090 
1091         if (esp_get_phase(s) == STAT_DO) {
1092             esp_nodma_ti_dataout(s);
1093         }
1094     }
1095 }
1096 
esp_hard_reset(ESPState * s)1097 void esp_hard_reset(ESPState *s)
1098 {
1099     memset(s->rregs, 0, ESP_REGS);
1100     memset(s->wregs, 0, ESP_REGS);
1101     s->tchi_written = 0;
1102     s->ti_size = 0;
1103     s->async_len = 0;
1104     fifo8_reset(&s->fifo);
1105     fifo8_reset(&s->cmdfifo);
1106     s->dma = 0;
1107     s->dma_cb = NULL;
1108     s->asc_mode = ESP_ASC_MODE_DIS;
1109 
1110     s->rregs[ESP_CFG1] = 7;
1111 }
1112 
esp_soft_reset(ESPState * s)1113 static void esp_soft_reset(ESPState *s)
1114 {
1115     qemu_irq_lower(s->irq);
1116     qemu_irq_lower(s->drq_irq);
1117     esp_hard_reset(s);
1118 }
1119 
esp_bus_reset(ESPState * s)1120 static void esp_bus_reset(ESPState *s)
1121 {
1122     bus_cold_reset(BUS(&s->bus));
1123 }
1124 
parent_esp_reset(ESPState * s,int irq,int level)1125 static void parent_esp_reset(ESPState *s, int irq, int level)
1126 {
1127     if (level) {
1128         esp_soft_reset(s);
1129     }
1130 }
1131 
esp_cmd_is_valid(ESPState * s,uint8_t cmd)1132 static bool esp_cmd_is_valid(ESPState *s, uint8_t cmd)
1133 {
1134     uint8_t cmd_group = (cmd & CMD_GRP_MASK) >> 4;
1135 
1136     /* Always allow misc commands */
1137     if (cmd_group == CMD_GRP_MISC) {
1138         return true;
1139     }
1140 
1141     switch (s->asc_mode) {
1142     case ESP_ASC_MODE_DIS:
1143         /* Disconnected mode: only allow disconnected commands */
1144         if (cmd_group == CMD_GRP_DISC) {
1145             return true;
1146         }
1147         break;
1148 
1149     case ESP_ASC_MODE_INI:
1150         /* Initiator mode: allow initiator commands */
1151         if (cmd_group == CMD_GRP_INIT) {
1152             return true;
1153         }
1154         break;
1155 
1156     default:
1157         g_assert_not_reached();
1158     }
1159 
1160     trace_esp_invalid_cmd(cmd, s->asc_mode);
1161     return false;
1162 }
1163 
esp_run_cmd(ESPState * s)1164 static void esp_run_cmd(ESPState *s)
1165 {
1166     uint8_t cmd = s->rregs[ESP_CMD];
1167 
1168     if (cmd & CMD_DMA) {
1169         s->dma = 1;
1170         /* Reload DMA counter.  */
1171         if (esp_get_stc(s) == 0) {
1172             esp_set_tc(s, 0x10000);
1173         } else {
1174             esp_set_tc(s, esp_get_stc(s));
1175         }
1176     } else {
1177         s->dma = 0;
1178     }
1179     switch (cmd & CMD_CMD) {
1180     case CMD_NOP:
1181         trace_esp_mem_writeb_cmd_nop(cmd);
1182         break;
1183     case CMD_FLUSH:
1184         trace_esp_mem_writeb_cmd_flush(cmd);
1185         fifo8_reset(&s->fifo);
1186         break;
1187     case CMD_RESET:
1188         trace_esp_mem_writeb_cmd_reset(cmd);
1189         esp_soft_reset(s);
1190         break;
1191     case CMD_BUSRESET:
1192         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1193         esp_bus_reset(s);
1194         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1195             s->rregs[ESP_RINTR] |= INTR_RST;
1196             esp_raise_irq(s);
1197         }
1198         break;
1199     case CMD_TI:
1200         trace_esp_mem_writeb_cmd_ti(cmd);
1201         handle_ti(s);
1202         break;
1203     case CMD_ICCS:
1204         trace_esp_mem_writeb_cmd_iccs(cmd);
1205         write_response(s);
1206         break;
1207     case CMD_MSGACC:
1208         trace_esp_mem_writeb_cmd_msgacc(cmd);
1209         s->asc_mode = ESP_ASC_MODE_DIS;
1210         s->rregs[ESP_RINTR] |= INTR_DC;
1211         s->rregs[ESP_RSEQ] = 0;
1212         s->rregs[ESP_RFLAGS] = 0;
1213         esp_raise_irq(s);
1214         break;
1215     case CMD_PAD:
1216         trace_esp_mem_writeb_cmd_pad(cmd);
1217         handle_pad(s);
1218         break;
1219     case CMD_SATN:
1220         trace_esp_mem_writeb_cmd_satn(cmd);
1221         break;
1222     case CMD_RSTATN:
1223         trace_esp_mem_writeb_cmd_rstatn(cmd);
1224         break;
1225     case CMD_SEL:
1226         trace_esp_mem_writeb_cmd_sel(cmd);
1227         handle_s_without_atn(s);
1228         break;
1229     case CMD_SELATN:
1230         trace_esp_mem_writeb_cmd_selatn(cmd);
1231         handle_satn(s);
1232         break;
1233     case CMD_SELATNS:
1234         trace_esp_mem_writeb_cmd_selatns(cmd);
1235         handle_satn_stop(s);
1236         break;
1237     case CMD_ENSEL:
1238         trace_esp_mem_writeb_cmd_ensel(cmd);
1239         s->rregs[ESP_RINTR] = 0;
1240         break;
1241     case CMD_DISSEL:
1242         trace_esp_mem_writeb_cmd_dissel(cmd);
1243         s->rregs[ESP_RINTR] = 0;
1244         esp_raise_irq(s);
1245         break;
1246     default:
1247         trace_esp_error_unhandled_command(cmd);
1248         break;
1249     }
1250 }
1251 
esp_reg_read(ESPState * s,uint32_t saddr)1252 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1253 {
1254     uint32_t val;
1255 
1256     switch (saddr) {
1257     case ESP_FIFO:
1258         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1259         val = s->rregs[ESP_FIFO];
1260         break;
1261     case ESP_RINTR:
1262         /*
1263          * Clear sequence step, interrupt register and all status bits
1264          * except TC
1265          */
1266         val = s->rregs[ESP_RINTR];
1267         s->rregs[ESP_RINTR] = 0;
1268         esp_lower_irq(s);
1269         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1270         /*
1271          * According to the datasheet ESP_RSEQ should be cleared, but as the
1272          * emulation currently defers information transfers to the next TI
1273          * command leave it for now so that pedantic guests such as the old
1274          * Linux 2.6 driver see the correct flags before the next SCSI phase
1275          * transition.
1276          *
1277          * s->rregs[ESP_RSEQ] = SEQ_0;
1278          */
1279         break;
1280     case ESP_TCHI:
1281         /* Return the unique id if the value has never been written */
1282         if (!s->tchi_written) {
1283             val = s->chip_id;
1284         } else {
1285             val = s->rregs[saddr];
1286         }
1287         break;
1288      case ESP_RFLAGS:
1289         /* Bottom 5 bits indicate number of bytes in FIFO */
1290         val = fifo8_num_used(&s->fifo);
1291         break;
1292     default:
1293         val = s->rregs[saddr];
1294         break;
1295     }
1296 
1297     trace_esp_mem_readb(saddr, val);
1298     return val;
1299 }
1300 
esp_reg_write(ESPState * s,uint32_t saddr,uint64_t val)1301 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1302 {
1303     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1304     switch (saddr) {
1305     case ESP_TCHI:
1306         s->tchi_written = true;
1307         /* fall through */
1308     case ESP_TCLO:
1309     case ESP_TCMID:
1310         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1311         break;
1312     case ESP_FIFO:
1313         if (!fifo8_is_full(&s->fifo)) {
1314             esp_fifo_push(s, val);
1315         }
1316         esp_do_nodma(s);
1317         break;
1318     case ESP_CMD:
1319         s->rregs[saddr] = val;
1320         if (!esp_cmd_is_valid(s, s->rregs[saddr])) {
1321             s->rregs[ESP_RSTAT] |= INTR_IL;
1322             esp_raise_irq(s);
1323             break;
1324         }
1325         esp_run_cmd(s);
1326         break;
1327     case ESP_WBUSID ... ESP_WSYNO:
1328         break;
1329     case ESP_CFG1:
1330     case ESP_CFG2: case ESP_CFG3:
1331     case ESP_RES3: case ESP_RES4:
1332         s->rregs[saddr] = val;
1333         break;
1334     case ESP_WCCF ... ESP_WTEST:
1335         break;
1336     default:
1337         trace_esp_error_invalid_write(val, saddr);
1338         return;
1339     }
1340     s->wregs[saddr] = val;
1341 }
1342 
esp_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1343 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1344                             unsigned size, bool is_write,
1345                             MemTxAttrs attrs)
1346 {
1347     return (size == 1) || (is_write && size == 4);
1348 }
1349 
esp_is_before_version_5(void * opaque,int version_id)1350 static bool esp_is_before_version_5(void *opaque, int version_id)
1351 {
1352     ESPState *s = ESP(opaque);
1353 
1354     version_id = MIN(version_id, s->mig_version_id);
1355     return version_id < 5;
1356 }
1357 
esp_is_version_5(void * opaque,int version_id)1358 static bool esp_is_version_5(void *opaque, int version_id)
1359 {
1360     ESPState *s = ESP(opaque);
1361 
1362     version_id = MIN(version_id, s->mig_version_id);
1363     return version_id >= 5;
1364 }
1365 
esp_is_version_6(void * opaque,int version_id)1366 static bool esp_is_version_6(void *opaque, int version_id)
1367 {
1368     ESPState *s = ESP(opaque);
1369 
1370     version_id = MIN(version_id, s->mig_version_id);
1371     return version_id >= 6;
1372 }
1373 
esp_is_between_version_5_and_6(void * opaque,int version_id)1374 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1375 {
1376     ESPState *s = ESP(opaque);
1377 
1378     version_id = MIN(version_id, s->mig_version_id);
1379     return version_id >= 5 && version_id <= 6;
1380 }
1381 
esp_is_version_8(void * opaque,int version_id)1382 static bool esp_is_version_8(void *opaque, int version_id)
1383 {
1384     ESPState *s = ESP(opaque);
1385 
1386     version_id = MIN(version_id, s->mig_version_id);
1387     return version_id >= 8;
1388 }
1389 
esp_pre_save(void * opaque)1390 int esp_pre_save(void *opaque)
1391 {
1392     ESPState *s = ESP(object_resolve_path_component(
1393                       OBJECT(opaque), "esp"));
1394 
1395     s->mig_version_id = vmstate_esp.version_id;
1396     return 0;
1397 }
1398 
esp_post_load(void * opaque,int version_id)1399 static int esp_post_load(void *opaque, int version_id)
1400 {
1401     ESPState *s = ESP(opaque);
1402     int len, i;
1403 
1404     version_id = MIN(version_id, s->mig_version_id);
1405 
1406     if (version_id < 5) {
1407         esp_set_tc(s, s->mig_dma_left);
1408 
1409         /* Migrate ti_buf to fifo */
1410         len = s->mig_ti_wptr - s->mig_ti_rptr;
1411         for (i = 0; i < len; i++) {
1412             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1413         }
1414 
1415         /* Migrate cmdbuf to cmdfifo */
1416         for (i = 0; i < s->mig_cmdlen; i++) {
1417             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1418         }
1419     }
1420 
1421     if (version_id < 8) {
1422         /* Assume initiator mode to allow all commands to continue */
1423         s->asc_mode = ESP_ASC_MODE_INI;
1424     }
1425 
1426     s->mig_version_id = vmstate_esp.version_id;
1427     return 0;
1428 }
1429 
1430 const VMStateDescription vmstate_esp = {
1431     .name = "esp",
1432     .version_id = 8,
1433     .minimum_version_id = 3,
1434     .post_load = esp_post_load,
1435     .fields = (const VMStateField[]) {
1436         VMSTATE_BUFFER(rregs, ESPState),
1437         VMSTATE_BUFFER(wregs, ESPState),
1438         VMSTATE_INT32(ti_size, ESPState),
1439         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1440         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1441         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1442         VMSTATE_UINT32(status, ESPState),
1443         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1444                             esp_is_before_version_5),
1445         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1446                           esp_is_before_version_5),
1447         VMSTATE_UINT32(dma, ESPState),
1448         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1449                               esp_is_before_version_5, 0, 16),
1450         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1451                               esp_is_before_version_5, 16,
1452                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1453         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1454         VMSTATE_UINT32(do_cmd, ESPState),
1455         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1456         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1457         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1458         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1459         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1460         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1461                            esp_is_between_version_5_and_6),
1462         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1463         VMSTATE_BOOL(drq_state, ESPState),
1464         VMSTATE_UINT8_TEST(asc_mode, ESPState, esp_is_version_8),
1465         VMSTATE_END_OF_LIST()
1466     },
1467 };
1468 
sysbus_esp_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1469 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1470                                  uint64_t val, unsigned int size)
1471 {
1472     SysBusESPState *sysbus = opaque;
1473     ESPState *s = ESP(&sysbus->esp);
1474     uint32_t saddr;
1475 
1476     saddr = addr >> sysbus->it_shift;
1477     esp_reg_write(s, saddr, val);
1478 }
1479 
sysbus_esp_mem_read(void * opaque,hwaddr addr,unsigned int size)1480 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1481                                     unsigned int size)
1482 {
1483     SysBusESPState *sysbus = opaque;
1484     ESPState *s = ESP(&sysbus->esp);
1485     uint32_t saddr;
1486 
1487     saddr = addr >> sysbus->it_shift;
1488     return esp_reg_read(s, saddr);
1489 }
1490 
1491 static const MemoryRegionOps sysbus_esp_mem_ops = {
1492     .read = sysbus_esp_mem_read,
1493     .write = sysbus_esp_mem_write,
1494     .endianness = DEVICE_NATIVE_ENDIAN,
1495     .valid.accepts = esp_mem_accepts,
1496 };
1497 
sysbus_esp_pdma_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1498 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1499                                   uint64_t val, unsigned int size)
1500 {
1501     SysBusESPState *sysbus = opaque;
1502     ESPState *s = ESP(&sysbus->esp);
1503 
1504     trace_esp_pdma_write(size);
1505 
1506     switch (size) {
1507     case 1:
1508         esp_pdma_write(s, val);
1509         break;
1510     case 2:
1511         esp_pdma_write(s, val >> 8);
1512         esp_pdma_write(s, val);
1513         break;
1514     }
1515     esp_do_dma(s);
1516 }
1517 
sysbus_esp_pdma_read(void * opaque,hwaddr addr,unsigned int size)1518 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1519                                      unsigned int size)
1520 {
1521     SysBusESPState *sysbus = opaque;
1522     ESPState *s = ESP(&sysbus->esp);
1523     uint64_t val = 0;
1524 
1525     trace_esp_pdma_read(size);
1526 
1527     switch (size) {
1528     case 1:
1529         val = esp_pdma_read(s);
1530         break;
1531     case 2:
1532         val = esp_pdma_read(s);
1533         val = (val << 8) | esp_pdma_read(s);
1534         break;
1535     }
1536     esp_do_dma(s);
1537     return val;
1538 }
1539 
esp_load_request(QEMUFile * f,SCSIRequest * req)1540 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1541 {
1542     ESPState *s = container_of(req->bus, ESPState, bus);
1543 
1544     scsi_req_ref(req);
1545     s->current_req = req;
1546     return s;
1547 }
1548 
1549 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1550     .read = sysbus_esp_pdma_read,
1551     .write = sysbus_esp_pdma_write,
1552     .endianness = DEVICE_NATIVE_ENDIAN,
1553     .valid.min_access_size = 1,
1554     .valid.max_access_size = 4,
1555     .impl.min_access_size = 1,
1556     .impl.max_access_size = 2,
1557 };
1558 
1559 static const struct SCSIBusInfo esp_scsi_info = {
1560     .tcq = false,
1561     .max_target = ESP_MAX_DEVS,
1562     .max_lun = 7,
1563 
1564     .load_request = esp_load_request,
1565     .transfer_data = esp_transfer_data,
1566     .complete = esp_command_complete,
1567     .cancel = esp_request_cancelled
1568 };
1569 
sysbus_esp_gpio_demux(void * opaque,int irq,int level)1570 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1571 {
1572     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1573     ESPState *s = ESP(&sysbus->esp);
1574 
1575     switch (irq) {
1576     case 0:
1577         parent_esp_reset(s, irq, level);
1578         break;
1579     case 1:
1580         esp_dma_enable(s, irq, level);
1581         break;
1582     }
1583 }
1584 
sysbus_esp_realize(DeviceState * dev,Error ** errp)1585 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1586 {
1587     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1588     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1589     ESPState *s = ESP(&sysbus->esp);
1590 
1591     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1592         return;
1593     }
1594 
1595     sysbus_init_irq(sbd, &s->irq);
1596     sysbus_init_irq(sbd, &s->drq_irq);
1597     assert(sysbus->it_shift != -1);
1598 
1599     s->chip_id = TCHI_FAS100A;
1600     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1601                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1602     sysbus_init_mmio(sbd, &sysbus->iomem);
1603     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1604                           sysbus, "esp-pdma", 4);
1605     sysbus_init_mmio(sbd, &sysbus->pdma);
1606 
1607     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1608 
1609     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1610 }
1611 
sysbus_esp_hard_reset(DeviceState * dev)1612 static void sysbus_esp_hard_reset(DeviceState *dev)
1613 {
1614     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1615     ESPState *s = ESP(&sysbus->esp);
1616 
1617     esp_hard_reset(s);
1618 }
1619 
sysbus_esp_init(Object * obj)1620 static void sysbus_esp_init(Object *obj)
1621 {
1622     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1623 
1624     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1625 }
1626 
1627 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1628     .name = "sysbusespscsi",
1629     .version_id = 2,
1630     .minimum_version_id = 1,
1631     .pre_save = esp_pre_save,
1632     .fields = (const VMStateField[]) {
1633         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1634         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1635         VMSTATE_END_OF_LIST()
1636     }
1637 };
1638 
sysbus_esp_class_init(ObjectClass * klass,const void * data)1639 static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
1640 {
1641     DeviceClass *dc = DEVICE_CLASS(klass);
1642 
1643     dc->realize = sysbus_esp_realize;
1644     device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1645     dc->vmsd = &vmstate_sysbus_esp_scsi;
1646     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1647 }
1648 
esp_finalize(Object * obj)1649 static void esp_finalize(Object *obj)
1650 {
1651     ESPState *s = ESP(obj);
1652 
1653     fifo8_destroy(&s->fifo);
1654     fifo8_destroy(&s->cmdfifo);
1655 }
1656 
esp_init(Object * obj)1657 static void esp_init(Object *obj)
1658 {
1659     ESPState *s = ESP(obj);
1660 
1661     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1662     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1663 }
1664 
esp_class_init(ObjectClass * klass,const void * data)1665 static void esp_class_init(ObjectClass *klass, const void *data)
1666 {
1667     DeviceClass *dc = DEVICE_CLASS(klass);
1668 
1669     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1670     dc->user_creatable = false;
1671     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1672 }
1673 
1674 static const TypeInfo esp_info_types[] = {
1675     {
1676         .name          = TYPE_SYSBUS_ESP,
1677         .parent        = TYPE_SYS_BUS_DEVICE,
1678         .instance_init = sysbus_esp_init,
1679         .instance_size = sizeof(SysBusESPState),
1680         .class_init    = sysbus_esp_class_init,
1681     },
1682     {
1683         .name = TYPE_ESP,
1684         .parent = TYPE_DEVICE,
1685         .instance_init = esp_init,
1686         .instance_finalize = esp_finalize,
1687         .instance_size = sizeof(ESPState),
1688         .class_init = esp_class_init,
1689     },
1690 };
1691 
1692 DEFINE_TYPES(esp_info_types)
1693