xref: /openbmc/qemu/hw/scsi/esp.c (revision 28a579a349015a7ed5a57cb4bdcdc5c60ba6b6fc)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 static const char *esp_phase_names[8] = {
83     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86 
87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89     s->rregs[ESP_RSTAT] &= ~7;
90     s->rregs[ESP_RSTAT] |= phase;
91 
92     trace_esp_set_phase(esp_phase_names[phase]);
93 }
94 
95 static uint8_t esp_get_phase(ESPState *s)
96 {
97     return s->rregs[ESP_RSTAT] & 7;
98 }
99 
100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102     if (level) {
103         s->dma_enabled = 1;
104         trace_esp_dma_enable();
105         if (s->dma_cb) {
106             s->dma_cb(s);
107             s->dma_cb = NULL;
108         }
109     } else {
110         trace_esp_dma_disable();
111         s->dma_enabled = 0;
112     }
113 }
114 
115 void esp_request_cancelled(SCSIRequest *req)
116 {
117     ESPState *s = req->hba_private;
118 
119     if (req == s->current_req) {
120         scsi_req_unref(s->current_req);
121         s->current_req = NULL;
122         s->current_dev = NULL;
123         s->async_len = 0;
124     }
125 }
126 
127 static void esp_update_drq(ESPState *s)
128 {
129     bool to_device;
130 
131     switch (esp_get_phase(s)) {
132     case STAT_MO:
133     case STAT_CD:
134     case STAT_DO:
135         to_device = true;
136         break;
137 
138     case STAT_DI:
139     case STAT_ST:
140     case STAT_MI:
141         to_device = false;
142         break;
143 
144     default:
145         return;
146     }
147 
148     if (s->dma) {
149         /* DMA request so update DRQ according to transfer direction */
150         if (to_device) {
151             if (fifo8_num_free(&s->fifo) < 2) {
152                 esp_lower_drq(s);
153             } else {
154                 esp_raise_drq(s);
155             }
156         } else {
157             if (fifo8_num_used(&s->fifo) < 2) {
158                 esp_lower_drq(s);
159             } else {
160                 esp_raise_drq(s);
161             }
162         }
163     } else {
164         /* Not a DMA request */
165         esp_lower_drq(s);
166     }
167 }
168 
169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172         trace_esp_error_fifo_overrun();
173     } else {
174         fifo8_push(&s->fifo, val);
175     }
176 
177     esp_update_drq(s);
178 }
179 
180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182     fifo8_push_all(&s->fifo, buf, len);
183     esp_update_drq(s);
184 }
185 
186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188     uint8_t val;
189 
190     if (fifo8_is_empty(&s->fifo)) {
191         val = 0;
192     } else {
193         val = fifo8_pop(&s->fifo);
194     }
195 
196     esp_update_drq(s);
197     return val;
198 }
199 
200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202     uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203 
204     esp_update_drq(s);
205     return len;
206 }
207 
208 static uint32_t esp_get_tc(ESPState *s)
209 {
210     uint32_t dmalen;
211 
212     dmalen = s->rregs[ESP_TCLO];
213     dmalen |= s->rregs[ESP_TCMID] << 8;
214     dmalen |= s->rregs[ESP_TCHI] << 16;
215 
216     return dmalen;
217 }
218 
219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221     uint32_t old_tc = esp_get_tc(s);
222 
223     s->rregs[ESP_TCLO] = dmalen;
224     s->rregs[ESP_TCMID] = dmalen >> 8;
225     s->rregs[ESP_TCHI] = dmalen >> 16;
226 
227     if (old_tc && dmalen == 0) {
228         s->rregs[ESP_RSTAT] |= STAT_TC;
229     }
230 }
231 
232 static uint32_t esp_get_stc(ESPState *s)
233 {
234     uint32_t dmalen;
235 
236     dmalen = s->wregs[ESP_TCLO];
237     dmalen |= s->wregs[ESP_TCMID] << 8;
238     dmalen |= s->wregs[ESP_TCHI] << 16;
239 
240     return dmalen;
241 }
242 
243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245     return esp_fifo_pop(s);
246 }
247 
248 static void esp_pdma_write(ESPState *s, uint8_t val)
249 {
250     uint32_t dmalen = esp_get_tc(s);
251 
252     esp_fifo_push(s, val);
253 
254     if (dmalen && s->drq_state) {
255         dmalen--;
256         esp_set_tc(s, dmalen);
257     }
258 }
259 
260 static int esp_select(ESPState *s)
261 {
262     int target;
263 
264     target = s->wregs[ESP_WBUSID] & BUSID_DID;
265 
266     s->ti_size = 0;
267     s->rregs[ESP_RSEQ] = SEQ_0;
268 
269     if (s->current_req) {
270         /* Started a new command before the old one finished. Cancel it. */
271         scsi_req_cancel(s->current_req);
272     }
273 
274     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
275     if (!s->current_dev) {
276         /* No such drive */
277         s->rregs[ESP_RSTAT] = 0;
278         s->rregs[ESP_RINTR] = INTR_DC;
279         esp_raise_irq(s);
280         return -1;
281     }
282 
283     /*
284      * Note that we deliberately don't raise the IRQ here: this will be done
285      * either in esp_transfer_data() or esp_command_complete()
286      */
287     return 0;
288 }
289 
290 static void esp_do_dma(ESPState *s);
291 static void esp_do_nodma(ESPState *s);
292 
293 static void do_command_phase(ESPState *s)
294 {
295     uint32_t cmdlen;
296     int32_t datalen;
297     SCSIDevice *current_lun;
298     uint8_t buf[ESP_CMDFIFO_SZ];
299 
300     trace_esp_do_command_phase(s->lun);
301     cmdlen = fifo8_num_used(&s->cmdfifo);
302     if (!cmdlen || !s->current_dev) {
303         return;
304     }
305     fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
306 
307     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
308     if (!current_lun) {
309         /* No such drive */
310         s->rregs[ESP_RSTAT] = 0;
311         s->rregs[ESP_RINTR] = INTR_DC;
312         s->rregs[ESP_RSEQ] = SEQ_0;
313         esp_raise_irq(s);
314         return;
315     }
316 
317     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
318     datalen = scsi_req_enqueue(s->current_req);
319     s->ti_size = datalen;
320     fifo8_reset(&s->cmdfifo);
321     s->data_ready = false;
322     if (datalen != 0) {
323         /*
324          * Switch to DATA phase but wait until initial data xfer is
325          * complete before raising the command completion interrupt
326          */
327         if (datalen > 0) {
328             esp_set_phase(s, STAT_DI);
329         } else {
330             esp_set_phase(s, STAT_DO);
331         }
332         scsi_req_continue(s->current_req);
333         return;
334     }
335 }
336 
337 static void do_message_phase(ESPState *s)
338 {
339     if (s->cmdfifo_cdb_offset) {
340         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
341                           fifo8_pop(&s->cmdfifo);
342 
343         trace_esp_do_identify(message);
344         s->lun = message & 7;
345         s->cmdfifo_cdb_offset--;
346     }
347 
348     /* Ignore extended messages for now */
349     if (s->cmdfifo_cdb_offset) {
350         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
351         fifo8_drop(&s->cmdfifo, len);
352         s->cmdfifo_cdb_offset = 0;
353     }
354 }
355 
356 static void do_cmd(ESPState *s)
357 {
358     do_message_phase(s);
359     assert(s->cmdfifo_cdb_offset == 0);
360     do_command_phase(s);
361 }
362 
363 static void handle_satn(ESPState *s)
364 {
365     if (s->dma && !s->dma_enabled) {
366         s->dma_cb = handle_satn;
367         return;
368     }
369 
370     if (esp_select(s) < 0) {
371         return;
372     }
373 
374     esp_set_phase(s, STAT_MO);
375 
376     if (s->dma) {
377         esp_do_dma(s);
378     } else {
379         esp_do_nodma(s);
380     }
381 }
382 
383 static void handle_s_without_atn(ESPState *s)
384 {
385     if (s->dma && !s->dma_enabled) {
386         s->dma_cb = handle_s_without_atn;
387         return;
388     }
389 
390     if (esp_select(s) < 0) {
391         return;
392     }
393 
394     esp_set_phase(s, STAT_CD);
395     s->cmdfifo_cdb_offset = 0;
396 
397     if (s->dma) {
398         esp_do_dma(s);
399     } else {
400         esp_do_nodma(s);
401     }
402 }
403 
404 static void handle_satn_stop(ESPState *s)
405 {
406     if (s->dma && !s->dma_enabled) {
407         s->dma_cb = handle_satn_stop;
408         return;
409     }
410 
411     if (esp_select(s) < 0) {
412         return;
413     }
414 
415     esp_set_phase(s, STAT_MO);
416     s->cmdfifo_cdb_offset = 0;
417 
418     if (s->dma) {
419         esp_do_dma(s);
420     } else {
421         esp_do_nodma(s);
422     }
423 }
424 
425 static void handle_pad(ESPState *s)
426 {
427     if (s->dma) {
428         esp_do_dma(s);
429     } else {
430         esp_do_nodma(s);
431     }
432 }
433 
434 static void write_response(ESPState *s)
435 {
436     trace_esp_write_response(s->status);
437 
438     if (s->dma) {
439         esp_do_dma(s);
440     } else {
441         esp_do_nodma(s);
442     }
443 }
444 
445 static bool esp_cdb_ready(ESPState *s)
446 {
447     int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
448     const uint8_t *pbuf;
449     uint32_t n;
450     int cdblen;
451 
452     if (len <= 0) {
453         return false;
454     }
455 
456     pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
457     if (n < len) {
458         /*
459          * In normal use the cmdfifo should never wrap, but include this check
460          * to prevent a malicious guest from reading past the end of the
461          * cmdfifo data buffer below
462          */
463         return false;
464     }
465 
466     cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
467 
468     return cdblen < 0 ? false : (len >= cdblen);
469 }
470 
471 static void esp_dma_ti_check(ESPState *s)
472 {
473     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
474         s->rregs[ESP_RINTR] |= INTR_BS;
475         esp_raise_irq(s);
476     }
477 }
478 
479 static void esp_do_dma(ESPState *s)
480 {
481     uint32_t len, cmdlen;
482     uint8_t buf[ESP_CMDFIFO_SZ];
483 
484     len = esp_get_tc(s);
485 
486     switch (esp_get_phase(s)) {
487     case STAT_MO:
488         if (s->dma_memory_read) {
489             len = MIN(len, fifo8_num_free(&s->cmdfifo));
490             if (len) {
491                 s->dma_memory_read(s->dma_opaque, buf, len);
492                 esp_set_tc(s, esp_get_tc(s) - len);
493             }
494         } else {
495             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
496             len = MIN(fifo8_num_free(&s->cmdfifo), len);
497         }
498 
499         fifo8_push_all(&s->cmdfifo, buf, len);
500         s->cmdfifo_cdb_offset += len;
501 
502         switch (s->rregs[ESP_CMD]) {
503         case CMD_SELATN | CMD_DMA:
504             if (fifo8_num_used(&s->cmdfifo) >= 1) {
505                 /* First byte received, switch to command phase */
506                 esp_set_phase(s, STAT_CD);
507                 s->rregs[ESP_RSEQ] = SEQ_CD;
508                 s->cmdfifo_cdb_offset = 1;
509 
510                 if (fifo8_num_used(&s->cmdfifo) > 1) {
511                     /* Process any additional command phase data */
512                     esp_do_dma(s);
513                 }
514             }
515             break;
516 
517         case CMD_SELATNS | CMD_DMA:
518             if (fifo8_num_used(&s->cmdfifo) == 1) {
519                 /* First byte received, stop in message out phase */
520                 s->rregs[ESP_RSEQ] = SEQ_MO;
521                 s->cmdfifo_cdb_offset = 1;
522 
523                 /* Raise command completion interrupt */
524                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
525                 esp_raise_irq(s);
526             }
527             break;
528 
529         case CMD_TI | CMD_DMA:
530             /* ATN remains asserted until TC == 0 */
531             if (esp_get_tc(s) == 0) {
532                 esp_set_phase(s, STAT_CD);
533                 s->rregs[ESP_CMD] = 0;
534                 s->rregs[ESP_RINTR] |= INTR_BS;
535                 esp_raise_irq(s);
536             }
537             break;
538         }
539         break;
540 
541     case STAT_CD:
542         cmdlen = fifo8_num_used(&s->cmdfifo);
543         trace_esp_do_dma(cmdlen, len);
544         if (s->dma_memory_read) {
545             len = MIN(len, fifo8_num_free(&s->cmdfifo));
546             if (len) {
547                 s->dma_memory_read(s->dma_opaque, buf, len);
548                 fifo8_push_all(&s->cmdfifo, buf, len);
549                 esp_set_tc(s, esp_get_tc(s) - len);
550             }
551         } else {
552             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
553             len = MIN(fifo8_num_free(&s->cmdfifo), len);
554             fifo8_push_all(&s->cmdfifo, buf, len);
555         }
556         trace_esp_handle_ti_cmd(cmdlen);
557         s->ti_size = 0;
558         if (esp_get_tc(s) == 0) {
559             /* Command has been received */
560             do_cmd(s);
561         }
562         break;
563 
564     case STAT_DO:
565         if (!s->current_req) {
566             return;
567         }
568         if (s->async_len == 0 && esp_get_tc(s)) {
569             /* Defer until data is available.  */
570             return;
571         }
572         if (len > s->async_len) {
573             len = s->async_len;
574         }
575 
576         switch (s->rregs[ESP_CMD]) {
577         case CMD_TI | CMD_DMA:
578             if (s->dma_memory_read) {
579                 if (len) {
580                     s->dma_memory_read(s->dma_opaque, s->async_buf, len);
581                     esp_set_tc(s, esp_get_tc(s) - len);
582                 }
583             } else {
584                 /* Copy FIFO data to device */
585                 len = MIN(s->async_len, ESP_FIFO_SZ);
586                 len = MIN(len, fifo8_num_used(&s->fifo));
587                 len = esp_fifo_pop_buf(s, s->async_buf, len);
588             }
589 
590             s->async_buf += len;
591             s->async_len -= len;
592             s->ti_size += len;
593             break;
594 
595         case CMD_PAD | CMD_DMA:
596             /* Copy TC zero bytes into the incoming stream */
597             if (!s->dma_memory_read) {
598                 len = MIN(s->async_len, ESP_FIFO_SZ);
599                 len = MIN(len, fifo8_num_free(&s->fifo));
600             }
601 
602             memset(s->async_buf, 0, len);
603 
604             s->async_buf += len;
605             s->async_len -= len;
606             s->ti_size += len;
607             break;
608         }
609 
610         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
611             /* Defer until the scsi layer has completed */
612             scsi_req_continue(s->current_req);
613             return;
614         }
615 
616         esp_dma_ti_check(s);
617         break;
618 
619     case STAT_DI:
620         if (!s->current_req) {
621             return;
622         }
623         if (s->async_len == 0 && esp_get_tc(s)) {
624             /* Defer until data is available.  */
625             return;
626         }
627         if (len > s->async_len) {
628             len = s->async_len;
629         }
630 
631         switch (s->rregs[ESP_CMD]) {
632         case CMD_TI | CMD_DMA:
633             if (s->dma_memory_write) {
634                 if (len) {
635                     s->dma_memory_write(s->dma_opaque, s->async_buf, len);
636                 }
637             } else {
638                 /* Copy device data to FIFO */
639                 len = MIN(len, fifo8_num_free(&s->fifo));
640                 esp_fifo_push_buf(s, s->async_buf, len);
641             }
642 
643             s->async_buf += len;
644             s->async_len -= len;
645             s->ti_size -= len;
646             esp_set_tc(s, esp_get_tc(s) - len);
647             break;
648 
649         case CMD_PAD | CMD_DMA:
650             /* Drop TC bytes from the incoming stream */
651             if (!s->dma_memory_write) {
652                 len = MIN(len, fifo8_num_free(&s->fifo));
653             }
654 
655             s->async_buf += len;
656             s->async_len -= len;
657             s->ti_size -= len;
658             esp_set_tc(s, esp_get_tc(s) - len);
659             break;
660         }
661 
662         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
663             /* If the guest underflows TC then terminate SCSI request */
664             scsi_req_continue(s->current_req);
665             return;
666         }
667 
668         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
669             /* Defer until the scsi layer has completed */
670             scsi_req_continue(s->current_req);
671             return;
672         }
673 
674         esp_dma_ti_check(s);
675         break;
676 
677     case STAT_ST:
678         switch (s->rregs[ESP_CMD]) {
679         case CMD_ICCS | CMD_DMA:
680             len = MIN(len, 1);
681 
682             if (len) {
683                 buf[0] = s->status;
684 
685                 if (s->dma_memory_write) {
686                     /* Length already non-zero */
687                     s->dma_memory_write(s->dma_opaque, buf, len);
688                 } else {
689                     esp_fifo_push_buf(s, buf, len);
690                 }
691 
692                 esp_set_tc(s, esp_get_tc(s) - len);
693                 esp_set_phase(s, STAT_MI);
694 
695                 if (esp_get_tc(s) > 0) {
696                     /* Process any message in phase data */
697                     esp_do_dma(s);
698                 }
699             }
700             break;
701 
702         default:
703             /* Consume remaining data if the guest underflows TC */
704             if (fifo8_num_used(&s->fifo) < 2) {
705                 s->rregs[ESP_RINTR] |= INTR_BS;
706                 esp_raise_irq(s);
707             }
708             break;
709         }
710         break;
711 
712     case STAT_MI:
713         switch (s->rregs[ESP_CMD]) {
714         case CMD_ICCS | CMD_DMA:
715             len = MIN(len, 1);
716 
717             if (len) {
718                 buf[0] = 0;
719 
720                 if (s->dma_memory_write) {
721                     /* Length already non-zero */
722                     s->dma_memory_write(s->dma_opaque, buf, len);
723                 } else {
724                     esp_fifo_push_buf(s, buf, len);
725                 }
726 
727                 esp_set_tc(s, esp_get_tc(s) - len);
728 
729                 /* Raise end of command interrupt */
730                 s->rregs[ESP_RINTR] |= INTR_FC;
731                 esp_raise_irq(s);
732             }
733             break;
734         }
735         break;
736     }
737 }
738 
739 static void esp_nodma_ti_dataout(ESPState *s)
740 {
741     int len;
742 
743     if (!s->current_req) {
744         return;
745     }
746     if (s->async_len == 0) {
747         /* Defer until data is available.  */
748         return;
749     }
750     len = MIN(s->async_len, ESP_FIFO_SZ);
751     len = MIN(len, fifo8_num_used(&s->fifo));
752     esp_fifo_pop_buf(s, s->async_buf, len);
753     s->async_buf += len;
754     s->async_len -= len;
755     s->ti_size += len;
756 
757     if (s->async_len == 0) {
758         scsi_req_continue(s->current_req);
759         return;
760     }
761 
762     s->rregs[ESP_RINTR] |= INTR_BS;
763     esp_raise_irq(s);
764 }
765 
766 static void esp_do_nodma(ESPState *s)
767 {
768     uint8_t buf[ESP_FIFO_SZ];
769     uint32_t cmdlen;
770     int len;
771 
772     switch (esp_get_phase(s)) {
773     case STAT_MO:
774         switch (s->rregs[ESP_CMD]) {
775         case CMD_SELATN:
776             /* Copy FIFO into cmdfifo */
777             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
778             len = MIN(fifo8_num_free(&s->cmdfifo), len);
779             fifo8_push_all(&s->cmdfifo, buf, len);
780 
781             if (fifo8_num_used(&s->cmdfifo) >= 1) {
782                 /* First byte received, switch to command phase */
783                 esp_set_phase(s, STAT_CD);
784                 s->rregs[ESP_RSEQ] = SEQ_CD;
785                 s->cmdfifo_cdb_offset = 1;
786 
787                 if (fifo8_num_used(&s->cmdfifo) > 1) {
788                     /* Process any additional command phase data */
789                     esp_do_nodma(s);
790                 }
791             }
792             break;
793 
794         case CMD_SELATNS:
795             /* Copy one byte from FIFO into cmdfifo */
796             len = esp_fifo_pop_buf(s, buf,
797                                    MIN(fifo8_num_used(&s->fifo), 1));
798             len = MIN(fifo8_num_free(&s->cmdfifo), len);
799             fifo8_push_all(&s->cmdfifo, buf, len);
800 
801             if (fifo8_num_used(&s->cmdfifo) >= 1) {
802                 /* First byte received, stop in message out phase */
803                 s->rregs[ESP_RSEQ] = SEQ_MO;
804                 s->cmdfifo_cdb_offset = 1;
805 
806                 /* Raise command completion interrupt */
807                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
808                 esp_raise_irq(s);
809             }
810             break;
811 
812         case CMD_TI:
813             /* Copy FIFO into cmdfifo */
814             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
815             len = MIN(fifo8_num_free(&s->cmdfifo), len);
816             fifo8_push_all(&s->cmdfifo, buf, len);
817 
818             /* ATN remains asserted until FIFO empty */
819             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
820             esp_set_phase(s, STAT_CD);
821             s->rregs[ESP_CMD] = 0;
822             s->rregs[ESP_RINTR] |= INTR_BS;
823             esp_raise_irq(s);
824             break;
825         }
826         break;
827 
828     case STAT_CD:
829         switch (s->rregs[ESP_CMD]) {
830         case CMD_TI:
831             /* Copy FIFO into cmdfifo */
832             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
833             len = MIN(fifo8_num_free(&s->cmdfifo), len);
834             fifo8_push_all(&s->cmdfifo, buf, len);
835 
836             cmdlen = fifo8_num_used(&s->cmdfifo);
837             trace_esp_handle_ti_cmd(cmdlen);
838 
839             /* CDB may be transferred in one or more TI commands */
840             if (esp_cdb_ready(s)) {
841                 /* Command has been received */
842                 do_cmd(s);
843             } else {
844                 /*
845                  * If data was transferred from the FIFO then raise bus
846                  * service interrupt to indicate transfer complete. Otherwise
847                  * defer until the next FIFO write.
848                  */
849                 if (len) {
850                     /* Raise interrupt to indicate transfer complete */
851                     s->rregs[ESP_RINTR] |= INTR_BS;
852                     esp_raise_irq(s);
853                 }
854             }
855             break;
856 
857         case CMD_SEL | CMD_DMA:
858         case CMD_SELATN | CMD_DMA:
859             /* Copy FIFO into cmdfifo */
860             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
861             len = MIN(fifo8_num_free(&s->cmdfifo), len);
862             fifo8_push_all(&s->cmdfifo, buf, len);
863 
864             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
865             if (esp_cdb_ready(s)) {
866                 /* Command has been received */
867                 do_cmd(s);
868             }
869             break;
870 
871         case CMD_SEL:
872         case CMD_SELATN:
873             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
874             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
875             len = MIN(fifo8_num_free(&s->cmdfifo), len);
876             fifo8_push_all(&s->cmdfifo, buf, len);
877 
878             do_cmd(s);
879             break;
880         }
881         break;
882 
883     case STAT_DO:
884         /* Accumulate data in FIFO until non-DMA TI is executed */
885         break;
886 
887     case STAT_DI:
888         if (!s->current_req) {
889             return;
890         }
891         if (s->async_len == 0) {
892             /* Defer until data is available.  */
893             return;
894         }
895         if (fifo8_is_empty(&s->fifo)) {
896             esp_fifo_push(s, s->async_buf[0]);
897             s->async_buf++;
898             s->async_len--;
899             s->ti_size--;
900         }
901 
902         if (s->async_len == 0) {
903             scsi_req_continue(s->current_req);
904             return;
905         }
906 
907         /* If preloading the FIFO, defer until TI command issued */
908         if (s->rregs[ESP_CMD] != CMD_TI) {
909             return;
910         }
911 
912         s->rregs[ESP_RINTR] |= INTR_BS;
913         esp_raise_irq(s);
914         break;
915 
916     case STAT_ST:
917         switch (s->rregs[ESP_CMD]) {
918         case CMD_ICCS:
919             esp_fifo_push(s, s->status);
920             esp_set_phase(s, STAT_MI);
921 
922             /* Process any message in phase data */
923             esp_do_nodma(s);
924             break;
925         }
926         break;
927 
928     case STAT_MI:
929         switch (s->rregs[ESP_CMD]) {
930         case CMD_ICCS:
931             esp_fifo_push(s, 0);
932 
933             /* Raise end of command interrupt */
934             s->rregs[ESP_RINTR] |= INTR_FC;
935             esp_raise_irq(s);
936             break;
937         }
938         break;
939     }
940 }
941 
942 void esp_command_complete(SCSIRequest *req, size_t resid)
943 {
944     ESPState *s = req->hba_private;
945     int to_device = (esp_get_phase(s) == STAT_DO);
946 
947     trace_esp_command_complete();
948 
949     /*
950      * Non-DMA transfers from the target will leave the last byte in
951      * the FIFO so don't reset ti_size in this case
952      */
953     if (s->dma || to_device) {
954         if (s->ti_size != 0) {
955             trace_esp_command_complete_unexpected();
956         }
957     }
958 
959     s->async_len = 0;
960     if (req->status) {
961         trace_esp_command_complete_fail();
962     }
963     s->status = req->status;
964 
965     /*
966      * Switch to status phase. For non-DMA transfers from the target the last
967      * byte is still in the FIFO
968      */
969     s->ti_size = 0;
970 
971     switch (s->rregs[ESP_CMD]) {
972     case CMD_SEL | CMD_DMA:
973     case CMD_SEL:
974     case CMD_SELATN | CMD_DMA:
975     case CMD_SELATN:
976         /*
977          * No data phase for sequencer command so raise deferred bus service
978          * and function complete interrupt
979          */
980         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
981         s->rregs[ESP_RSEQ] = SEQ_CD;
982         break;
983 
984     case CMD_TI | CMD_DMA:
985     case CMD_TI:
986         s->rregs[ESP_CMD] = 0;
987         break;
988     }
989 
990     /* Raise bus service interrupt to indicate change to STATUS phase */
991     esp_set_phase(s, STAT_ST);
992     s->rregs[ESP_RINTR] |= INTR_BS;
993     esp_raise_irq(s);
994 
995     if (s->current_req) {
996         scsi_req_unref(s->current_req);
997         s->current_req = NULL;
998         s->current_dev = NULL;
999     }
1000 }
1001 
1002 void esp_transfer_data(SCSIRequest *req, uint32_t len)
1003 {
1004     ESPState *s = req->hba_private;
1005     uint32_t dmalen = esp_get_tc(s);
1006 
1007     trace_esp_transfer_data(dmalen, s->ti_size);
1008     s->async_len = len;
1009     s->async_buf = scsi_req_get_buf(req);
1010 
1011     if (!s->data_ready) {
1012         s->data_ready = true;
1013 
1014         switch (s->rregs[ESP_CMD]) {
1015         case CMD_SEL | CMD_DMA:
1016         case CMD_SEL:
1017         case CMD_SELATN | CMD_DMA:
1018         case CMD_SELATN:
1019             /*
1020              * Initial incoming data xfer is complete for sequencer command
1021              * so raise deferred bus service and function complete interrupt
1022              */
1023              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1024              s->rregs[ESP_RSEQ] = SEQ_CD;
1025              esp_raise_irq(s);
1026              break;
1027 
1028         case CMD_SELATNS | CMD_DMA:
1029         case CMD_SELATNS:
1030             /*
1031              * Initial incoming data xfer is complete so raise command
1032              * completion interrupt
1033              */
1034              s->rregs[ESP_RINTR] |= INTR_BS;
1035              s->rregs[ESP_RSEQ] = SEQ_MO;
1036              esp_raise_irq(s);
1037              break;
1038 
1039         case CMD_TI | CMD_DMA:
1040         case CMD_TI:
1041             /*
1042              * If the final COMMAND phase data was transferred using a TI
1043              * command, clear ESP_CMD to terminate the TI command and raise
1044              * the completion interrupt
1045              */
1046             s->rregs[ESP_CMD] = 0;
1047             s->rregs[ESP_RINTR] |= INTR_BS;
1048             esp_raise_irq(s);
1049             break;
1050         }
1051     }
1052 
1053     /*
1054      * Always perform the initial transfer upon reception of the next TI
1055      * command to ensure the DMA/non-DMA status of the command is correct.
1056      * It is not possible to use s->dma directly in the section below as
1057      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1058      * async data transfer is delayed then s->dma is set incorrectly.
1059      */
1060 
1061     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1062         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1063         esp_dma_ti_check(s);
1064 
1065         esp_do_dma(s);
1066     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1067         esp_do_nodma(s);
1068     }
1069 }
1070 
1071 static void handle_ti(ESPState *s)
1072 {
1073     uint32_t dmalen;
1074 
1075     if (s->dma && !s->dma_enabled) {
1076         s->dma_cb = handle_ti;
1077         return;
1078     }
1079 
1080     if (s->dma) {
1081         dmalen = esp_get_tc(s);
1082         trace_esp_handle_ti(dmalen);
1083         esp_do_dma(s);
1084     } else {
1085         trace_esp_handle_ti(s->ti_size);
1086         esp_do_nodma(s);
1087 
1088         if (esp_get_phase(s) == STAT_DO) {
1089             esp_nodma_ti_dataout(s);
1090         }
1091     }
1092 }
1093 
1094 void esp_hard_reset(ESPState *s)
1095 {
1096     memset(s->rregs, 0, ESP_REGS);
1097     memset(s->wregs, 0, ESP_REGS);
1098     s->tchi_written = 0;
1099     s->ti_size = 0;
1100     s->async_len = 0;
1101     fifo8_reset(&s->fifo);
1102     fifo8_reset(&s->cmdfifo);
1103     s->dma = 0;
1104     s->dma_cb = NULL;
1105 
1106     s->rregs[ESP_CFG1] = 7;
1107 }
1108 
1109 static void esp_soft_reset(ESPState *s)
1110 {
1111     qemu_irq_lower(s->irq);
1112     qemu_irq_lower(s->drq_irq);
1113     esp_hard_reset(s);
1114 }
1115 
1116 static void esp_bus_reset(ESPState *s)
1117 {
1118     bus_cold_reset(BUS(&s->bus));
1119 }
1120 
1121 static void parent_esp_reset(ESPState *s, int irq, int level)
1122 {
1123     if (level) {
1124         esp_soft_reset(s);
1125     }
1126 }
1127 
1128 static void esp_run_cmd(ESPState *s)
1129 {
1130     uint8_t cmd = s->rregs[ESP_CMD];
1131 
1132     if (cmd & CMD_DMA) {
1133         s->dma = 1;
1134         /* Reload DMA counter.  */
1135         if (esp_get_stc(s) == 0) {
1136             esp_set_tc(s, 0x10000);
1137         } else {
1138             esp_set_tc(s, esp_get_stc(s));
1139         }
1140     } else {
1141         s->dma = 0;
1142     }
1143     switch (cmd & CMD_CMD) {
1144     case CMD_NOP:
1145         trace_esp_mem_writeb_cmd_nop(cmd);
1146         break;
1147     case CMD_FLUSH:
1148         trace_esp_mem_writeb_cmd_flush(cmd);
1149         fifo8_reset(&s->fifo);
1150         break;
1151     case CMD_RESET:
1152         trace_esp_mem_writeb_cmd_reset(cmd);
1153         esp_soft_reset(s);
1154         break;
1155     case CMD_BUSRESET:
1156         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1157         esp_bus_reset(s);
1158         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1159             s->rregs[ESP_RINTR] |= INTR_RST;
1160             esp_raise_irq(s);
1161         }
1162         break;
1163     case CMD_TI:
1164         trace_esp_mem_writeb_cmd_ti(cmd);
1165         handle_ti(s);
1166         break;
1167     case CMD_ICCS:
1168         trace_esp_mem_writeb_cmd_iccs(cmd);
1169         write_response(s);
1170         break;
1171     case CMD_MSGACC:
1172         trace_esp_mem_writeb_cmd_msgacc(cmd);
1173         s->rregs[ESP_RINTR] |= INTR_DC;
1174         s->rregs[ESP_RSEQ] = 0;
1175         s->rregs[ESP_RFLAGS] = 0;
1176         esp_raise_irq(s);
1177         break;
1178     case CMD_PAD:
1179         trace_esp_mem_writeb_cmd_pad(cmd);
1180         handle_pad(s);
1181         break;
1182     case CMD_SATN:
1183         trace_esp_mem_writeb_cmd_satn(cmd);
1184         break;
1185     case CMD_RSTATN:
1186         trace_esp_mem_writeb_cmd_rstatn(cmd);
1187         break;
1188     case CMD_SEL:
1189         trace_esp_mem_writeb_cmd_sel(cmd);
1190         handle_s_without_atn(s);
1191         break;
1192     case CMD_SELATN:
1193         trace_esp_mem_writeb_cmd_selatn(cmd);
1194         handle_satn(s);
1195         break;
1196     case CMD_SELATNS:
1197         trace_esp_mem_writeb_cmd_selatns(cmd);
1198         handle_satn_stop(s);
1199         break;
1200     case CMD_ENSEL:
1201         trace_esp_mem_writeb_cmd_ensel(cmd);
1202         s->rregs[ESP_RINTR] = 0;
1203         break;
1204     case CMD_DISSEL:
1205         trace_esp_mem_writeb_cmd_dissel(cmd);
1206         s->rregs[ESP_RINTR] = 0;
1207         esp_raise_irq(s);
1208         break;
1209     default:
1210         trace_esp_error_unhandled_command(cmd);
1211         break;
1212     }
1213 }
1214 
1215 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1216 {
1217     uint32_t val;
1218 
1219     switch (saddr) {
1220     case ESP_FIFO:
1221         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1222         val = s->rregs[ESP_FIFO];
1223         break;
1224     case ESP_RINTR:
1225         /*
1226          * Clear sequence step, interrupt register and all status bits
1227          * except TC
1228          */
1229         val = s->rregs[ESP_RINTR];
1230         s->rregs[ESP_RINTR] = 0;
1231         esp_lower_irq(s);
1232         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1233         /*
1234          * According to the datasheet ESP_RSEQ should be cleared, but as the
1235          * emulation currently defers information transfers to the next TI
1236          * command leave it for now so that pedantic guests such as the old
1237          * Linux 2.6 driver see the correct flags before the next SCSI phase
1238          * transition.
1239          *
1240          * s->rregs[ESP_RSEQ] = SEQ_0;
1241          */
1242         break;
1243     case ESP_TCHI:
1244         /* Return the unique id if the value has never been written */
1245         if (!s->tchi_written) {
1246             val = s->chip_id;
1247         } else {
1248             val = s->rregs[saddr];
1249         }
1250         break;
1251      case ESP_RFLAGS:
1252         /* Bottom 5 bits indicate number of bytes in FIFO */
1253         val = fifo8_num_used(&s->fifo);
1254         break;
1255     default:
1256         val = s->rregs[saddr];
1257         break;
1258     }
1259 
1260     trace_esp_mem_readb(saddr, val);
1261     return val;
1262 }
1263 
1264 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1265 {
1266     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1267     switch (saddr) {
1268     case ESP_TCHI:
1269         s->tchi_written = true;
1270         /* fall through */
1271     case ESP_TCLO:
1272     case ESP_TCMID:
1273         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1274         break;
1275     case ESP_FIFO:
1276         if (!fifo8_is_full(&s->fifo)) {
1277             esp_fifo_push(s, val);
1278         }
1279         esp_do_nodma(s);
1280         break;
1281     case ESP_CMD:
1282         s->rregs[saddr] = val;
1283         esp_run_cmd(s);
1284         break;
1285     case ESP_WBUSID ... ESP_WSYNO:
1286         break;
1287     case ESP_CFG1:
1288     case ESP_CFG2: case ESP_CFG3:
1289     case ESP_RES3: case ESP_RES4:
1290         s->rregs[saddr] = val;
1291         break;
1292     case ESP_WCCF ... ESP_WTEST:
1293         break;
1294     default:
1295         trace_esp_error_invalid_write(val, saddr);
1296         return;
1297     }
1298     s->wregs[saddr] = val;
1299 }
1300 
1301 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1302                             unsigned size, bool is_write,
1303                             MemTxAttrs attrs)
1304 {
1305     return (size == 1) || (is_write && size == 4);
1306 }
1307 
1308 static bool esp_is_before_version_5(void *opaque, int version_id)
1309 {
1310     ESPState *s = ESP(opaque);
1311 
1312     version_id = MIN(version_id, s->mig_version_id);
1313     return version_id < 5;
1314 }
1315 
1316 static bool esp_is_version_5(void *opaque, int version_id)
1317 {
1318     ESPState *s = ESP(opaque);
1319 
1320     version_id = MIN(version_id, s->mig_version_id);
1321     return version_id >= 5;
1322 }
1323 
1324 static bool esp_is_version_6(void *opaque, int version_id)
1325 {
1326     ESPState *s = ESP(opaque);
1327 
1328     version_id = MIN(version_id, s->mig_version_id);
1329     return version_id >= 6;
1330 }
1331 
1332 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1333 {
1334     ESPState *s = ESP(opaque);
1335 
1336     version_id = MIN(version_id, s->mig_version_id);
1337     return version_id >= 5 && version_id <= 6;
1338 }
1339 
1340 int esp_pre_save(void *opaque)
1341 {
1342     ESPState *s = ESP(object_resolve_path_component(
1343                       OBJECT(opaque), "esp"));
1344 
1345     s->mig_version_id = vmstate_esp.version_id;
1346     return 0;
1347 }
1348 
1349 static int esp_post_load(void *opaque, int version_id)
1350 {
1351     ESPState *s = ESP(opaque);
1352     int len, i;
1353 
1354     version_id = MIN(version_id, s->mig_version_id);
1355 
1356     if (version_id < 5) {
1357         esp_set_tc(s, s->mig_dma_left);
1358 
1359         /* Migrate ti_buf to fifo */
1360         len = s->mig_ti_wptr - s->mig_ti_rptr;
1361         for (i = 0; i < len; i++) {
1362             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1363         }
1364 
1365         /* Migrate cmdbuf to cmdfifo */
1366         for (i = 0; i < s->mig_cmdlen; i++) {
1367             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1368         }
1369     }
1370 
1371     s->mig_version_id = vmstate_esp.version_id;
1372     return 0;
1373 }
1374 
1375 const VMStateDescription vmstate_esp = {
1376     .name = "esp",
1377     .version_id = 7,
1378     .minimum_version_id = 3,
1379     .post_load = esp_post_load,
1380     .fields = (const VMStateField[]) {
1381         VMSTATE_BUFFER(rregs, ESPState),
1382         VMSTATE_BUFFER(wregs, ESPState),
1383         VMSTATE_INT32(ti_size, ESPState),
1384         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1385         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1386         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1387         VMSTATE_UINT32(status, ESPState),
1388         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1389                             esp_is_before_version_5),
1390         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1391                           esp_is_before_version_5),
1392         VMSTATE_UINT32(dma, ESPState),
1393         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1394                               esp_is_before_version_5, 0, 16),
1395         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1396                               esp_is_before_version_5, 16,
1397                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1398         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1399         VMSTATE_UINT32(do_cmd, ESPState),
1400         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1401         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1402         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1403         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1404         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1405         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1406                            esp_is_between_version_5_and_6),
1407         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1408         VMSTATE_BOOL(drq_state, ESPState),
1409         VMSTATE_END_OF_LIST()
1410     },
1411 };
1412 
1413 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1414                                  uint64_t val, unsigned int size)
1415 {
1416     SysBusESPState *sysbus = opaque;
1417     ESPState *s = ESP(&sysbus->esp);
1418     uint32_t saddr;
1419 
1420     saddr = addr >> sysbus->it_shift;
1421     esp_reg_write(s, saddr, val);
1422 }
1423 
1424 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1425                                     unsigned int size)
1426 {
1427     SysBusESPState *sysbus = opaque;
1428     ESPState *s = ESP(&sysbus->esp);
1429     uint32_t saddr;
1430 
1431     saddr = addr >> sysbus->it_shift;
1432     return esp_reg_read(s, saddr);
1433 }
1434 
1435 static const MemoryRegionOps sysbus_esp_mem_ops = {
1436     .read = sysbus_esp_mem_read,
1437     .write = sysbus_esp_mem_write,
1438     .endianness = DEVICE_NATIVE_ENDIAN,
1439     .valid.accepts = esp_mem_accepts,
1440 };
1441 
1442 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1443                                   uint64_t val, unsigned int size)
1444 {
1445     SysBusESPState *sysbus = opaque;
1446     ESPState *s = ESP(&sysbus->esp);
1447 
1448     trace_esp_pdma_write(size);
1449 
1450     switch (size) {
1451     case 1:
1452         esp_pdma_write(s, val);
1453         break;
1454     case 2:
1455         esp_pdma_write(s, val >> 8);
1456         esp_pdma_write(s, val);
1457         break;
1458     }
1459     esp_do_dma(s);
1460 }
1461 
1462 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1463                                      unsigned int size)
1464 {
1465     SysBusESPState *sysbus = opaque;
1466     ESPState *s = ESP(&sysbus->esp);
1467     uint64_t val = 0;
1468 
1469     trace_esp_pdma_read(size);
1470 
1471     switch (size) {
1472     case 1:
1473         val = esp_pdma_read(s);
1474         break;
1475     case 2:
1476         val = esp_pdma_read(s);
1477         val = (val << 8) | esp_pdma_read(s);
1478         break;
1479     }
1480     esp_do_dma(s);
1481     return val;
1482 }
1483 
1484 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1485 {
1486     ESPState *s = container_of(req->bus, ESPState, bus);
1487 
1488     scsi_req_ref(req);
1489     s->current_req = req;
1490     return s;
1491 }
1492 
1493 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1494     .read = sysbus_esp_pdma_read,
1495     .write = sysbus_esp_pdma_write,
1496     .endianness = DEVICE_NATIVE_ENDIAN,
1497     .valid.min_access_size = 1,
1498     .valid.max_access_size = 4,
1499     .impl.min_access_size = 1,
1500     .impl.max_access_size = 2,
1501 };
1502 
1503 static const struct SCSIBusInfo esp_scsi_info = {
1504     .tcq = false,
1505     .max_target = ESP_MAX_DEVS,
1506     .max_lun = 7,
1507 
1508     .load_request = esp_load_request,
1509     .transfer_data = esp_transfer_data,
1510     .complete = esp_command_complete,
1511     .cancel = esp_request_cancelled
1512 };
1513 
1514 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1515 {
1516     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1517     ESPState *s = ESP(&sysbus->esp);
1518 
1519     switch (irq) {
1520     case 0:
1521         parent_esp_reset(s, irq, level);
1522         break;
1523     case 1:
1524         esp_dma_enable(s, irq, level);
1525         break;
1526     }
1527 }
1528 
1529 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1530 {
1531     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1532     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1533     ESPState *s = ESP(&sysbus->esp);
1534 
1535     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1536         return;
1537     }
1538 
1539     sysbus_init_irq(sbd, &s->irq);
1540     sysbus_init_irq(sbd, &s->drq_irq);
1541     assert(sysbus->it_shift != -1);
1542 
1543     s->chip_id = TCHI_FAS100A;
1544     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1545                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1546     sysbus_init_mmio(sbd, &sysbus->iomem);
1547     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1548                           sysbus, "esp-pdma", 4);
1549     sysbus_init_mmio(sbd, &sysbus->pdma);
1550 
1551     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1552 
1553     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1554 }
1555 
1556 static void sysbus_esp_hard_reset(DeviceState *dev)
1557 {
1558     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1559     ESPState *s = ESP(&sysbus->esp);
1560 
1561     esp_hard_reset(s);
1562 }
1563 
1564 static void sysbus_esp_init(Object *obj)
1565 {
1566     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1567 
1568     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1569 }
1570 
1571 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1572     .name = "sysbusespscsi",
1573     .version_id = 2,
1574     .minimum_version_id = 1,
1575     .pre_save = esp_pre_save,
1576     .fields = (const VMStateField[]) {
1577         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1578         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1579         VMSTATE_END_OF_LIST()
1580     }
1581 };
1582 
1583 static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
1584 {
1585     DeviceClass *dc = DEVICE_CLASS(klass);
1586 
1587     dc->realize = sysbus_esp_realize;
1588     device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1589     dc->vmsd = &vmstate_sysbus_esp_scsi;
1590     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1591 }
1592 
1593 static void esp_finalize(Object *obj)
1594 {
1595     ESPState *s = ESP(obj);
1596 
1597     fifo8_destroy(&s->fifo);
1598     fifo8_destroy(&s->cmdfifo);
1599 }
1600 
1601 static void esp_init(Object *obj)
1602 {
1603     ESPState *s = ESP(obj);
1604 
1605     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1606     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1607 }
1608 
1609 static void esp_class_init(ObjectClass *klass, const void *data)
1610 {
1611     DeviceClass *dc = DEVICE_CLASS(klass);
1612 
1613     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1614     dc->user_creatable = false;
1615     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1616 }
1617 
1618 static const TypeInfo esp_info_types[] = {
1619     {
1620         .name          = TYPE_SYSBUS_ESP,
1621         .parent        = TYPE_SYS_BUS_DEVICE,
1622         .instance_init = sysbus_esp_init,
1623         .instance_size = sizeof(SysBusESPState),
1624         .class_init    = sysbus_esp_class_init,
1625     },
1626     {
1627         .name = TYPE_ESP,
1628         .parent = TYPE_DEVICE,
1629         .instance_init = esp_init,
1630         .instance_finalize = esp_finalize,
1631         .instance_size = sizeof(ESPState),
1632         .class_init = esp_class_init,
1633     },
1634 };
1635 
1636 DEFINE_TYPES(esp_info_types)
1637