xref: /openbmc/qemu/hw/scsi/esp.c (revision 1fa3812ee884dba8dbbd9d2f121b10c67469cae3)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 static const char *esp_phase_names[8] = {
83     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86 
87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89     s->rregs[ESP_RSTAT] &= ~7;
90     s->rregs[ESP_RSTAT] |= phase;
91 
92     trace_esp_set_phase(esp_phase_names[phase]);
93 }
94 
95 static uint8_t esp_get_phase(ESPState *s)
96 {
97     return s->rregs[ESP_RSTAT] & 7;
98 }
99 
100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102     if (level) {
103         s->dma_enabled = 1;
104         trace_esp_dma_enable();
105         if (s->dma_cb) {
106             s->dma_cb(s);
107             s->dma_cb = NULL;
108         }
109     } else {
110         trace_esp_dma_disable();
111         s->dma_enabled = 0;
112     }
113 }
114 
115 void esp_request_cancelled(SCSIRequest *req)
116 {
117     ESPState *s = req->hba_private;
118 
119     if (req == s->current_req) {
120         scsi_req_unref(s->current_req);
121         s->current_req = NULL;
122         s->current_dev = NULL;
123         s->async_len = 0;
124     }
125 }
126 
127 static void esp_update_drq(ESPState *s)
128 {
129     bool to_device;
130 
131     switch (esp_get_phase(s)) {
132     case STAT_MO:
133     case STAT_CD:
134     case STAT_DO:
135         to_device = true;
136         break;
137 
138     case STAT_DI:
139     case STAT_ST:
140     case STAT_MI:
141         to_device = false;
142         break;
143 
144     default:
145         return;
146     }
147 
148     if (s->dma) {
149         /* DMA request so update DRQ according to transfer direction */
150         if (to_device) {
151             if (fifo8_num_free(&s->fifo) < 2) {
152                 esp_lower_drq(s);
153             } else {
154                 esp_raise_drq(s);
155             }
156         } else {
157             if (fifo8_num_used(&s->fifo) < 2) {
158                 esp_lower_drq(s);
159             } else {
160                 esp_raise_drq(s);
161             }
162         }
163     } else {
164         /* Not a DMA request */
165         esp_lower_drq(s);
166     }
167 }
168 
169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172         trace_esp_error_fifo_overrun();
173     } else {
174         fifo8_push(&s->fifo, val);
175     }
176 
177     esp_update_drq(s);
178 }
179 
180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182     fifo8_push_all(&s->fifo, buf, len);
183     esp_update_drq(s);
184 }
185 
186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188     uint8_t val;
189 
190     if (fifo8_is_empty(&s->fifo)) {
191         val = 0;
192     } else {
193         val = fifo8_pop(&s->fifo);
194     }
195 
196     esp_update_drq(s);
197     return val;
198 }
199 
200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202     uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203 
204     esp_update_drq(s);
205     return len;
206 }
207 
208 static uint32_t esp_get_tc(ESPState *s)
209 {
210     uint32_t dmalen;
211 
212     dmalen = s->rregs[ESP_TCLO];
213     dmalen |= s->rregs[ESP_TCMID] << 8;
214     dmalen |= s->rregs[ESP_TCHI] << 16;
215 
216     return dmalen;
217 }
218 
219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221     uint32_t old_tc = esp_get_tc(s);
222 
223     s->rregs[ESP_TCLO] = dmalen;
224     s->rregs[ESP_TCMID] = dmalen >> 8;
225     s->rregs[ESP_TCHI] = dmalen >> 16;
226 
227     if (old_tc && dmalen == 0) {
228         s->rregs[ESP_RSTAT] |= STAT_TC;
229     }
230 }
231 
232 static uint32_t esp_get_stc(ESPState *s)
233 {
234     uint32_t dmalen;
235 
236     dmalen = s->wregs[ESP_TCLO];
237     dmalen |= s->wregs[ESP_TCMID] << 8;
238     dmalen |= s->wregs[ESP_TCHI] << 16;
239 
240     return dmalen;
241 }
242 
243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245     return esp_fifo_pop(s);
246 }
247 
248 static void esp_pdma_write(ESPState *s, uint8_t val)
249 {
250     uint32_t dmalen = esp_get_tc(s);
251 
252     esp_fifo_push(s, val);
253 
254     if (dmalen && s->drq_state) {
255         dmalen--;
256         esp_set_tc(s, dmalen);
257     }
258 }
259 
260 static int esp_select(ESPState *s)
261 {
262     int target;
263 
264     target = s->wregs[ESP_WBUSID] & BUSID_DID;
265 
266     s->ti_size = 0;
267     s->rregs[ESP_RSEQ] = SEQ_0;
268 
269     if (s->current_req) {
270         /* Started a new command before the old one finished. Cancel it. */
271         scsi_req_cancel(s->current_req);
272     }
273 
274     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
275     if (!s->current_dev) {
276         /* No such drive */
277         s->rregs[ESP_RSTAT] = 0;
278         s->rregs[ESP_RINTR] = INTR_DC;
279         esp_raise_irq(s);
280         return -1;
281     }
282 
283     /*
284      * Note that we deliberately don't raise the IRQ here: this will be done
285      * either in esp_transfer_data() or esp_command_complete()
286      */
287     return 0;
288 }
289 
290 static void esp_do_dma(ESPState *s);
291 static void esp_do_nodma(ESPState *s);
292 
293 static void do_command_phase(ESPState *s)
294 {
295     uint32_t cmdlen;
296     int32_t datalen;
297     SCSIDevice *current_lun;
298     uint8_t buf[ESP_CMDFIFO_SZ];
299 
300     trace_esp_do_command_phase(s->lun);
301     cmdlen = fifo8_num_used(&s->cmdfifo);
302     if (!cmdlen || !s->current_dev) {
303         return;
304     }
305     fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
306 
307     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
308     if (!current_lun) {
309         /* No such drive */
310         s->rregs[ESP_RSTAT] = 0;
311         s->rregs[ESP_RINTR] = INTR_DC;
312         s->rregs[ESP_RSEQ] = SEQ_0;
313         esp_raise_irq(s);
314         return;
315     }
316 
317     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
318     datalen = scsi_req_enqueue(s->current_req);
319     s->ti_size = datalen;
320     fifo8_reset(&s->cmdfifo);
321     s->data_ready = false;
322     if (datalen != 0) {
323         /*
324          * Switch to DATA phase but wait until initial data xfer is
325          * complete before raising the command completion interrupt
326          */
327         if (datalen > 0) {
328             esp_set_phase(s, STAT_DI);
329         } else {
330             esp_set_phase(s, STAT_DO);
331         }
332         scsi_req_continue(s->current_req);
333         return;
334     }
335 }
336 
337 static void do_message_phase(ESPState *s)
338 {
339     if (s->cmdfifo_cdb_offset) {
340         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
341                           fifo8_pop(&s->cmdfifo);
342 
343         trace_esp_do_identify(message);
344         s->lun = message & 7;
345         s->cmdfifo_cdb_offset--;
346     }
347 
348     /* Ignore extended messages for now */
349     if (s->cmdfifo_cdb_offset) {
350         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
351         fifo8_drop(&s->cmdfifo, len);
352         s->cmdfifo_cdb_offset = 0;
353     }
354 }
355 
356 static void do_cmd(ESPState *s)
357 {
358     do_message_phase(s);
359     assert(s->cmdfifo_cdb_offset == 0);
360     do_command_phase(s);
361 }
362 
363 static void handle_satn(ESPState *s)
364 {
365     if (s->dma && !s->dma_enabled) {
366         s->dma_cb = handle_satn;
367         return;
368     }
369 
370     if (esp_select(s) < 0) {
371         return;
372     }
373 
374     esp_set_phase(s, STAT_MO);
375 
376     if (s->dma) {
377         esp_do_dma(s);
378     } else {
379         esp_do_nodma(s);
380     }
381 }
382 
383 static void handle_s_without_atn(ESPState *s)
384 {
385     if (s->dma && !s->dma_enabled) {
386         s->dma_cb = handle_s_without_atn;
387         return;
388     }
389 
390     if (esp_select(s) < 0) {
391         return;
392     }
393 
394     esp_set_phase(s, STAT_CD);
395     s->cmdfifo_cdb_offset = 0;
396 
397     if (s->dma) {
398         esp_do_dma(s);
399     } else {
400         esp_do_nodma(s);
401     }
402 }
403 
404 static void handle_satn_stop(ESPState *s)
405 {
406     if (s->dma && !s->dma_enabled) {
407         s->dma_cb = handle_satn_stop;
408         return;
409     }
410 
411     if (esp_select(s) < 0) {
412         return;
413     }
414 
415     esp_set_phase(s, STAT_MO);
416     s->cmdfifo_cdb_offset = 0;
417 
418     if (s->dma) {
419         esp_do_dma(s);
420     } else {
421         esp_do_nodma(s);
422     }
423 }
424 
425 static void handle_pad(ESPState *s)
426 {
427     if (s->dma) {
428         esp_do_dma(s);
429     } else {
430         esp_do_nodma(s);
431     }
432 }
433 
434 static void write_response(ESPState *s)
435 {
436     trace_esp_write_response(s->status);
437 
438     if (s->dma) {
439         esp_do_dma(s);
440     } else {
441         esp_do_nodma(s);
442     }
443 }
444 
445 static bool esp_cdb_ready(ESPState *s)
446 {
447     int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
448     const uint8_t *pbuf;
449     uint32_t n;
450     int cdblen;
451 
452     if (len <= 0) {
453         return false;
454     }
455 
456     pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
457     if (n < len) {
458         /*
459          * In normal use the cmdfifo should never wrap, but include this check
460          * to prevent a malicious guest from reading past the end of the
461          * cmdfifo data buffer below
462          */
463         return false;
464     }
465 
466     cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
467 
468     return cdblen < 0 ? false : (len >= cdblen);
469 }
470 
471 static void esp_dma_ti_check(ESPState *s)
472 {
473     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
474         s->rregs[ESP_RINTR] |= INTR_BS;
475         esp_raise_irq(s);
476     }
477 }
478 
479 static void esp_do_dma(ESPState *s)
480 {
481     uint32_t len, cmdlen;
482     uint8_t buf[ESP_CMDFIFO_SZ];
483 
484     len = esp_get_tc(s);
485 
486     switch (esp_get_phase(s)) {
487     case STAT_MO:
488         if (s->dma_memory_read) {
489             len = MIN(len, fifo8_num_free(&s->cmdfifo));
490             s->dma_memory_read(s->dma_opaque, buf, len);
491             esp_set_tc(s, esp_get_tc(s) - len);
492         } else {
493             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
494             len = MIN(fifo8_num_free(&s->cmdfifo), len);
495         }
496 
497         fifo8_push_all(&s->cmdfifo, buf, len);
498         s->cmdfifo_cdb_offset += len;
499 
500         switch (s->rregs[ESP_CMD]) {
501         case CMD_SELATN | CMD_DMA:
502             if (fifo8_num_used(&s->cmdfifo) >= 1) {
503                 /* First byte received, switch to command phase */
504                 esp_set_phase(s, STAT_CD);
505                 s->rregs[ESP_RSEQ] = SEQ_CD;
506                 s->cmdfifo_cdb_offset = 1;
507 
508                 if (fifo8_num_used(&s->cmdfifo) > 1) {
509                     /* Process any additional command phase data */
510                     esp_do_dma(s);
511                 }
512             }
513             break;
514 
515         case CMD_SELATNS | CMD_DMA:
516             if (fifo8_num_used(&s->cmdfifo) == 1) {
517                 /* First byte received, stop in message out phase */
518                 s->rregs[ESP_RSEQ] = SEQ_MO;
519                 s->cmdfifo_cdb_offset = 1;
520 
521                 /* Raise command completion interrupt */
522                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
523                 esp_raise_irq(s);
524             }
525             break;
526 
527         case CMD_TI | CMD_DMA:
528             /* ATN remains asserted until TC == 0 */
529             if (esp_get_tc(s) == 0) {
530                 esp_set_phase(s, STAT_CD);
531                 s->rregs[ESP_CMD] = 0;
532                 s->rregs[ESP_RINTR] |= INTR_BS;
533                 esp_raise_irq(s);
534             }
535             break;
536         }
537         break;
538 
539     case STAT_CD:
540         cmdlen = fifo8_num_used(&s->cmdfifo);
541         trace_esp_do_dma(cmdlen, len);
542         if (s->dma_memory_read) {
543             len = MIN(len, fifo8_num_free(&s->cmdfifo));
544             s->dma_memory_read(s->dma_opaque, buf, len);
545             fifo8_push_all(&s->cmdfifo, buf, len);
546             esp_set_tc(s, esp_get_tc(s) - len);
547         } else {
548             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
549             len = MIN(fifo8_num_free(&s->cmdfifo), len);
550             fifo8_push_all(&s->cmdfifo, buf, len);
551         }
552         trace_esp_handle_ti_cmd(cmdlen);
553         s->ti_size = 0;
554         if (esp_get_tc(s) == 0) {
555             /* Command has been received */
556             do_cmd(s);
557         }
558         break;
559 
560     case STAT_DO:
561         if (!s->current_req) {
562             return;
563         }
564         if (s->async_len == 0 && esp_get_tc(s)) {
565             /* Defer until data is available.  */
566             return;
567         }
568         if (len > s->async_len) {
569             len = s->async_len;
570         }
571 
572         switch (s->rregs[ESP_CMD]) {
573         case CMD_TI | CMD_DMA:
574             if (s->dma_memory_read) {
575                 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
576                 esp_set_tc(s, esp_get_tc(s) - len);
577             } else {
578                 /* Copy FIFO data to device */
579                 len = MIN(s->async_len, ESP_FIFO_SZ);
580                 len = MIN(len, fifo8_num_used(&s->fifo));
581                 len = esp_fifo_pop_buf(s, s->async_buf, len);
582             }
583 
584             s->async_buf += len;
585             s->async_len -= len;
586             s->ti_size += len;
587             break;
588 
589         case CMD_PAD | CMD_DMA:
590             /* Copy TC zero bytes into the incoming stream */
591             if (!s->dma_memory_read) {
592                 len = MIN(s->async_len, ESP_FIFO_SZ);
593                 len = MIN(len, fifo8_num_free(&s->fifo));
594             }
595 
596             memset(s->async_buf, 0, len);
597 
598             s->async_buf += len;
599             s->async_len -= len;
600             s->ti_size += len;
601             break;
602         }
603 
604         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
605             /* Defer until the scsi layer has completed */
606             scsi_req_continue(s->current_req);
607             return;
608         }
609 
610         esp_dma_ti_check(s);
611         break;
612 
613     case STAT_DI:
614         if (!s->current_req) {
615             return;
616         }
617         if (s->async_len == 0 && esp_get_tc(s)) {
618             /* Defer until data is available.  */
619             return;
620         }
621         if (len > s->async_len) {
622             len = s->async_len;
623         }
624 
625         switch (s->rregs[ESP_CMD]) {
626         case CMD_TI | CMD_DMA:
627             if (s->dma_memory_write) {
628                 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
629             } else {
630                 /* Copy device data to FIFO */
631                 len = MIN(len, fifo8_num_free(&s->fifo));
632                 esp_fifo_push_buf(s, s->async_buf, len);
633             }
634 
635             s->async_buf += len;
636             s->async_len -= len;
637             s->ti_size -= len;
638             esp_set_tc(s, esp_get_tc(s) - len);
639             break;
640 
641         case CMD_PAD | CMD_DMA:
642             /* Drop TC bytes from the incoming stream */
643             if (!s->dma_memory_write) {
644                 len = MIN(len, fifo8_num_free(&s->fifo));
645             }
646 
647             s->async_buf += len;
648             s->async_len -= len;
649             s->ti_size -= len;
650             esp_set_tc(s, esp_get_tc(s) - len);
651             break;
652         }
653 
654         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
655             /* If the guest underflows TC then terminate SCSI request */
656             scsi_req_continue(s->current_req);
657             return;
658         }
659 
660         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
661             /* Defer until the scsi layer has completed */
662             scsi_req_continue(s->current_req);
663             return;
664         }
665 
666         esp_dma_ti_check(s);
667         break;
668 
669     case STAT_ST:
670         switch (s->rregs[ESP_CMD]) {
671         case CMD_ICCS | CMD_DMA:
672             len = MIN(len, 1);
673 
674             if (len) {
675                 buf[0] = s->status;
676 
677                 if (s->dma_memory_write) {
678                     s->dma_memory_write(s->dma_opaque, buf, len);
679                 } else {
680                     esp_fifo_push_buf(s, buf, len);
681                 }
682 
683                 esp_set_tc(s, esp_get_tc(s) - len);
684                 esp_set_phase(s, STAT_MI);
685 
686                 if (esp_get_tc(s) > 0) {
687                     /* Process any message in phase data */
688                     esp_do_dma(s);
689                 }
690             }
691             break;
692 
693         default:
694             /* Consume remaining data if the guest underflows TC */
695             if (fifo8_num_used(&s->fifo) < 2) {
696                 s->rregs[ESP_RINTR] |= INTR_BS;
697                 esp_raise_irq(s);
698             }
699             break;
700         }
701         break;
702 
703     case STAT_MI:
704         switch (s->rregs[ESP_CMD]) {
705         case CMD_ICCS | CMD_DMA:
706             len = MIN(len, 1);
707 
708             if (len) {
709                 buf[0] = 0;
710 
711                 if (s->dma_memory_write) {
712                     s->dma_memory_write(s->dma_opaque, buf, len);
713                 } else {
714                     esp_fifo_push_buf(s, buf, len);
715                 }
716 
717                 esp_set_tc(s, esp_get_tc(s) - len);
718 
719                 /* Raise end of command interrupt */
720                 s->rregs[ESP_RINTR] |= INTR_FC;
721                 esp_raise_irq(s);
722             }
723             break;
724         }
725         break;
726     }
727 }
728 
729 static void esp_nodma_ti_dataout(ESPState *s)
730 {
731     int len;
732 
733     if (!s->current_req) {
734         return;
735     }
736     if (s->async_len == 0) {
737         /* Defer until data is available.  */
738         return;
739     }
740     len = MIN(s->async_len, ESP_FIFO_SZ);
741     len = MIN(len, fifo8_num_used(&s->fifo));
742     esp_fifo_pop_buf(s, s->async_buf, len);
743     s->async_buf += len;
744     s->async_len -= len;
745     s->ti_size += len;
746 
747     if (s->async_len == 0) {
748         scsi_req_continue(s->current_req);
749         return;
750     }
751 
752     s->rregs[ESP_RINTR] |= INTR_BS;
753     esp_raise_irq(s);
754 }
755 
756 static void esp_do_nodma(ESPState *s)
757 {
758     uint8_t buf[ESP_FIFO_SZ];
759     uint32_t cmdlen;
760     int len;
761 
762     switch (esp_get_phase(s)) {
763     case STAT_MO:
764         switch (s->rregs[ESP_CMD]) {
765         case CMD_SELATN:
766             /* Copy FIFO into cmdfifo */
767             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
768             len = MIN(fifo8_num_free(&s->cmdfifo), len);
769             fifo8_push_all(&s->cmdfifo, buf, len);
770 
771             if (fifo8_num_used(&s->cmdfifo) >= 1) {
772                 /* First byte received, switch to command phase */
773                 esp_set_phase(s, STAT_CD);
774                 s->rregs[ESP_RSEQ] = SEQ_CD;
775                 s->cmdfifo_cdb_offset = 1;
776 
777                 if (fifo8_num_used(&s->cmdfifo) > 1) {
778                     /* Process any additional command phase data */
779                     esp_do_nodma(s);
780                 }
781             }
782             break;
783 
784         case CMD_SELATNS:
785             /* Copy one byte from FIFO into cmdfifo */
786             len = esp_fifo_pop_buf(s, buf,
787                                    MIN(fifo8_num_used(&s->fifo), 1));
788             len = MIN(fifo8_num_free(&s->cmdfifo), len);
789             fifo8_push_all(&s->cmdfifo, buf, len);
790 
791             if (fifo8_num_used(&s->cmdfifo) >= 1) {
792                 /* First byte received, stop in message out phase */
793                 s->rregs[ESP_RSEQ] = SEQ_MO;
794                 s->cmdfifo_cdb_offset = 1;
795 
796                 /* Raise command completion interrupt */
797                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
798                 esp_raise_irq(s);
799             }
800             break;
801 
802         case CMD_TI:
803             /* Copy FIFO into cmdfifo */
804             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
805             len = MIN(fifo8_num_free(&s->cmdfifo), len);
806             fifo8_push_all(&s->cmdfifo, buf, len);
807 
808             /* ATN remains asserted until FIFO empty */
809             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
810             esp_set_phase(s, STAT_CD);
811             s->rregs[ESP_CMD] = 0;
812             s->rregs[ESP_RINTR] |= INTR_BS;
813             esp_raise_irq(s);
814             break;
815         }
816         break;
817 
818     case STAT_CD:
819         switch (s->rregs[ESP_CMD]) {
820         case CMD_TI:
821             /* Copy FIFO into cmdfifo */
822             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
823             len = MIN(fifo8_num_free(&s->cmdfifo), len);
824             fifo8_push_all(&s->cmdfifo, buf, len);
825 
826             cmdlen = fifo8_num_used(&s->cmdfifo);
827             trace_esp_handle_ti_cmd(cmdlen);
828 
829             /* CDB may be transferred in one or more TI commands */
830             if (esp_cdb_ready(s)) {
831                 /* Command has been received */
832                 do_cmd(s);
833             } else {
834                 /*
835                  * If data was transferred from the FIFO then raise bus
836                  * service interrupt to indicate transfer complete. Otherwise
837                  * defer until the next FIFO write.
838                  */
839                 if (len) {
840                     /* Raise interrupt to indicate transfer complete */
841                     s->rregs[ESP_RINTR] |= INTR_BS;
842                     esp_raise_irq(s);
843                 }
844             }
845             break;
846 
847         case CMD_SEL | CMD_DMA:
848         case CMD_SELATN | CMD_DMA:
849             /* Copy FIFO into cmdfifo */
850             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
851             len = MIN(fifo8_num_free(&s->cmdfifo), len);
852             fifo8_push_all(&s->cmdfifo, buf, len);
853 
854             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
855             if (esp_cdb_ready(s)) {
856                 /* Command has been received */
857                 do_cmd(s);
858             }
859             break;
860 
861         case CMD_SEL:
862         case CMD_SELATN:
863             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
864             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
865             len = MIN(fifo8_num_free(&s->cmdfifo), len);
866             fifo8_push_all(&s->cmdfifo, buf, len);
867 
868             do_cmd(s);
869             break;
870         }
871         break;
872 
873     case STAT_DO:
874         /* Accumulate data in FIFO until non-DMA TI is executed */
875         break;
876 
877     case STAT_DI:
878         if (!s->current_req) {
879             return;
880         }
881         if (s->async_len == 0) {
882             /* Defer until data is available.  */
883             return;
884         }
885         if (fifo8_is_empty(&s->fifo)) {
886             esp_fifo_push(s, s->async_buf[0]);
887             s->async_buf++;
888             s->async_len--;
889             s->ti_size--;
890         }
891 
892         if (s->async_len == 0) {
893             scsi_req_continue(s->current_req);
894             return;
895         }
896 
897         /* If preloading the FIFO, defer until TI command issued */
898         if (s->rregs[ESP_CMD] != CMD_TI) {
899             return;
900         }
901 
902         s->rregs[ESP_RINTR] |= INTR_BS;
903         esp_raise_irq(s);
904         break;
905 
906     case STAT_ST:
907         switch (s->rregs[ESP_CMD]) {
908         case CMD_ICCS:
909             esp_fifo_push(s, s->status);
910             esp_set_phase(s, STAT_MI);
911 
912             /* Process any message in phase data */
913             esp_do_nodma(s);
914             break;
915         }
916         break;
917 
918     case STAT_MI:
919         switch (s->rregs[ESP_CMD]) {
920         case CMD_ICCS:
921             esp_fifo_push(s, 0);
922 
923             /* Raise end of command interrupt */
924             s->rregs[ESP_RINTR] |= INTR_FC;
925             esp_raise_irq(s);
926             break;
927         }
928         break;
929     }
930 }
931 
932 void esp_command_complete(SCSIRequest *req, size_t resid)
933 {
934     ESPState *s = req->hba_private;
935     int to_device = (esp_get_phase(s) == STAT_DO);
936 
937     trace_esp_command_complete();
938 
939     /*
940      * Non-DMA transfers from the target will leave the last byte in
941      * the FIFO so don't reset ti_size in this case
942      */
943     if (s->dma || to_device) {
944         if (s->ti_size != 0) {
945             trace_esp_command_complete_unexpected();
946         }
947     }
948 
949     s->async_len = 0;
950     if (req->status) {
951         trace_esp_command_complete_fail();
952     }
953     s->status = req->status;
954 
955     /*
956      * Switch to status phase. For non-DMA transfers from the target the last
957      * byte is still in the FIFO
958      */
959     s->ti_size = 0;
960 
961     switch (s->rregs[ESP_CMD]) {
962     case CMD_SEL | CMD_DMA:
963     case CMD_SEL:
964     case CMD_SELATN | CMD_DMA:
965     case CMD_SELATN:
966         /*
967          * No data phase for sequencer command so raise deferred bus service
968          * and function complete interrupt
969          */
970         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
971         s->rregs[ESP_RSEQ] = SEQ_CD;
972         break;
973 
974     case CMD_TI | CMD_DMA:
975     case CMD_TI:
976         s->rregs[ESP_CMD] = 0;
977         break;
978     }
979 
980     /* Raise bus service interrupt to indicate change to STATUS phase */
981     esp_set_phase(s, STAT_ST);
982     s->rregs[ESP_RINTR] |= INTR_BS;
983     esp_raise_irq(s);
984 
985     if (s->current_req) {
986         scsi_req_unref(s->current_req);
987         s->current_req = NULL;
988         s->current_dev = NULL;
989     }
990 }
991 
992 void esp_transfer_data(SCSIRequest *req, uint32_t len)
993 {
994     ESPState *s = req->hba_private;
995     uint32_t dmalen = esp_get_tc(s);
996 
997     trace_esp_transfer_data(dmalen, s->ti_size);
998     s->async_len = len;
999     s->async_buf = scsi_req_get_buf(req);
1000 
1001     if (!s->data_ready) {
1002         s->data_ready = true;
1003 
1004         switch (s->rregs[ESP_CMD]) {
1005         case CMD_SEL | CMD_DMA:
1006         case CMD_SEL:
1007         case CMD_SELATN | CMD_DMA:
1008         case CMD_SELATN:
1009             /*
1010              * Initial incoming data xfer is complete for sequencer command
1011              * so raise deferred bus service and function complete interrupt
1012              */
1013              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1014              s->rregs[ESP_RSEQ] = SEQ_CD;
1015              esp_raise_irq(s);
1016              break;
1017 
1018         case CMD_SELATNS | CMD_DMA:
1019         case CMD_SELATNS:
1020             /*
1021              * Initial incoming data xfer is complete so raise command
1022              * completion interrupt
1023              */
1024              s->rregs[ESP_RINTR] |= INTR_BS;
1025              s->rregs[ESP_RSEQ] = SEQ_MO;
1026              esp_raise_irq(s);
1027              break;
1028 
1029         case CMD_TI | CMD_DMA:
1030         case CMD_TI:
1031             /*
1032              * Bus service interrupt raised because of initial change to
1033              * DATA phase
1034              */
1035             s->rregs[ESP_CMD] = 0;
1036             s->rregs[ESP_RINTR] |= INTR_BS;
1037             esp_raise_irq(s);
1038             break;
1039         }
1040     }
1041 
1042     /*
1043      * Always perform the initial transfer upon reception of the next TI
1044      * command to ensure the DMA/non-DMA status of the command is correct.
1045      * It is not possible to use s->dma directly in the section below as
1046      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1047      * async data transfer is delayed then s->dma is set incorrectly.
1048      */
1049 
1050     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1051         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1052         esp_dma_ti_check(s);
1053 
1054         esp_do_dma(s);
1055     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1056         esp_do_nodma(s);
1057     }
1058 }
1059 
1060 static void handle_ti(ESPState *s)
1061 {
1062     uint32_t dmalen;
1063 
1064     if (s->dma && !s->dma_enabled) {
1065         s->dma_cb = handle_ti;
1066         return;
1067     }
1068 
1069     if (s->dma) {
1070         dmalen = esp_get_tc(s);
1071         trace_esp_handle_ti(dmalen);
1072         esp_do_dma(s);
1073     } else {
1074         trace_esp_handle_ti(s->ti_size);
1075         esp_do_nodma(s);
1076 
1077         if (esp_get_phase(s) == STAT_DO) {
1078             esp_nodma_ti_dataout(s);
1079         }
1080     }
1081 }
1082 
1083 void esp_hard_reset(ESPState *s)
1084 {
1085     memset(s->rregs, 0, ESP_REGS);
1086     memset(s->wregs, 0, ESP_REGS);
1087     s->tchi_written = 0;
1088     s->ti_size = 0;
1089     s->async_len = 0;
1090     fifo8_reset(&s->fifo);
1091     fifo8_reset(&s->cmdfifo);
1092     s->dma = 0;
1093     s->dma_cb = NULL;
1094 
1095     s->rregs[ESP_CFG1] = 7;
1096 }
1097 
1098 static void esp_soft_reset(ESPState *s)
1099 {
1100     qemu_irq_lower(s->irq);
1101     qemu_irq_lower(s->drq_irq);
1102     esp_hard_reset(s);
1103 }
1104 
1105 static void esp_bus_reset(ESPState *s)
1106 {
1107     bus_cold_reset(BUS(&s->bus));
1108 }
1109 
1110 static void parent_esp_reset(ESPState *s, int irq, int level)
1111 {
1112     if (level) {
1113         esp_soft_reset(s);
1114     }
1115 }
1116 
1117 static void esp_run_cmd(ESPState *s)
1118 {
1119     uint8_t cmd = s->rregs[ESP_CMD];
1120 
1121     if (cmd & CMD_DMA) {
1122         s->dma = 1;
1123         /* Reload DMA counter.  */
1124         if (esp_get_stc(s) == 0) {
1125             esp_set_tc(s, 0x10000);
1126         } else {
1127             esp_set_tc(s, esp_get_stc(s));
1128         }
1129     } else {
1130         s->dma = 0;
1131     }
1132     switch (cmd & CMD_CMD) {
1133     case CMD_NOP:
1134         trace_esp_mem_writeb_cmd_nop(cmd);
1135         break;
1136     case CMD_FLUSH:
1137         trace_esp_mem_writeb_cmd_flush(cmd);
1138         fifo8_reset(&s->fifo);
1139         break;
1140     case CMD_RESET:
1141         trace_esp_mem_writeb_cmd_reset(cmd);
1142         esp_soft_reset(s);
1143         break;
1144     case CMD_BUSRESET:
1145         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1146         esp_bus_reset(s);
1147         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1148             s->rregs[ESP_RINTR] |= INTR_RST;
1149             esp_raise_irq(s);
1150         }
1151         break;
1152     case CMD_TI:
1153         trace_esp_mem_writeb_cmd_ti(cmd);
1154         handle_ti(s);
1155         break;
1156     case CMD_ICCS:
1157         trace_esp_mem_writeb_cmd_iccs(cmd);
1158         write_response(s);
1159         break;
1160     case CMD_MSGACC:
1161         trace_esp_mem_writeb_cmd_msgacc(cmd);
1162         s->rregs[ESP_RINTR] |= INTR_DC;
1163         s->rregs[ESP_RSEQ] = 0;
1164         s->rregs[ESP_RFLAGS] = 0;
1165         esp_raise_irq(s);
1166         break;
1167     case CMD_PAD:
1168         trace_esp_mem_writeb_cmd_pad(cmd);
1169         handle_pad(s);
1170         break;
1171     case CMD_SATN:
1172         trace_esp_mem_writeb_cmd_satn(cmd);
1173         break;
1174     case CMD_RSTATN:
1175         trace_esp_mem_writeb_cmd_rstatn(cmd);
1176         break;
1177     case CMD_SEL:
1178         trace_esp_mem_writeb_cmd_sel(cmd);
1179         handle_s_without_atn(s);
1180         break;
1181     case CMD_SELATN:
1182         trace_esp_mem_writeb_cmd_selatn(cmd);
1183         handle_satn(s);
1184         break;
1185     case CMD_SELATNS:
1186         trace_esp_mem_writeb_cmd_selatns(cmd);
1187         handle_satn_stop(s);
1188         break;
1189     case CMD_ENSEL:
1190         trace_esp_mem_writeb_cmd_ensel(cmd);
1191         s->rregs[ESP_RINTR] = 0;
1192         break;
1193     case CMD_DISSEL:
1194         trace_esp_mem_writeb_cmd_dissel(cmd);
1195         s->rregs[ESP_RINTR] = 0;
1196         esp_raise_irq(s);
1197         break;
1198     default:
1199         trace_esp_error_unhandled_command(cmd);
1200         break;
1201     }
1202 }
1203 
1204 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1205 {
1206     uint32_t val;
1207 
1208     switch (saddr) {
1209     case ESP_FIFO:
1210         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1211         val = s->rregs[ESP_FIFO];
1212         break;
1213     case ESP_RINTR:
1214         /*
1215          * Clear sequence step, interrupt register and all status bits
1216          * except TC
1217          */
1218         val = s->rregs[ESP_RINTR];
1219         s->rregs[ESP_RINTR] = 0;
1220         esp_lower_irq(s);
1221         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1222         /*
1223          * According to the datasheet ESP_RSEQ should be cleared, but as the
1224          * emulation currently defers information transfers to the next TI
1225          * command leave it for now so that pedantic guests such as the old
1226          * Linux 2.6 driver see the correct flags before the next SCSI phase
1227          * transition.
1228          *
1229          * s->rregs[ESP_RSEQ] = SEQ_0;
1230          */
1231         break;
1232     case ESP_TCHI:
1233         /* Return the unique id if the value has never been written */
1234         if (!s->tchi_written) {
1235             val = s->chip_id;
1236         } else {
1237             val = s->rregs[saddr];
1238         }
1239         break;
1240      case ESP_RFLAGS:
1241         /* Bottom 5 bits indicate number of bytes in FIFO */
1242         val = fifo8_num_used(&s->fifo);
1243         break;
1244     default:
1245         val = s->rregs[saddr];
1246         break;
1247     }
1248 
1249     trace_esp_mem_readb(saddr, val);
1250     return val;
1251 }
1252 
1253 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1254 {
1255     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1256     switch (saddr) {
1257     case ESP_TCHI:
1258         s->tchi_written = true;
1259         /* fall through */
1260     case ESP_TCLO:
1261     case ESP_TCMID:
1262         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1263         break;
1264     case ESP_FIFO:
1265         if (!fifo8_is_full(&s->fifo)) {
1266             esp_fifo_push(s, val);
1267         }
1268         esp_do_nodma(s);
1269         break;
1270     case ESP_CMD:
1271         s->rregs[saddr] = val;
1272         esp_run_cmd(s);
1273         break;
1274     case ESP_WBUSID ... ESP_WSYNO:
1275         break;
1276     case ESP_CFG1:
1277     case ESP_CFG2: case ESP_CFG3:
1278     case ESP_RES3: case ESP_RES4:
1279         s->rregs[saddr] = val;
1280         break;
1281     case ESP_WCCF ... ESP_WTEST:
1282         break;
1283     default:
1284         trace_esp_error_invalid_write(val, saddr);
1285         return;
1286     }
1287     s->wregs[saddr] = val;
1288 }
1289 
1290 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1291                             unsigned size, bool is_write,
1292                             MemTxAttrs attrs)
1293 {
1294     return (size == 1) || (is_write && size == 4);
1295 }
1296 
1297 static bool esp_is_before_version_5(void *opaque, int version_id)
1298 {
1299     ESPState *s = ESP(opaque);
1300 
1301     version_id = MIN(version_id, s->mig_version_id);
1302     return version_id < 5;
1303 }
1304 
1305 static bool esp_is_version_5(void *opaque, int version_id)
1306 {
1307     ESPState *s = ESP(opaque);
1308 
1309     version_id = MIN(version_id, s->mig_version_id);
1310     return version_id >= 5;
1311 }
1312 
1313 static bool esp_is_version_6(void *opaque, int version_id)
1314 {
1315     ESPState *s = ESP(opaque);
1316 
1317     version_id = MIN(version_id, s->mig_version_id);
1318     return version_id >= 6;
1319 }
1320 
1321 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1322 {
1323     ESPState *s = ESP(opaque);
1324 
1325     version_id = MIN(version_id, s->mig_version_id);
1326     return version_id >= 5 && version_id <= 6;
1327 }
1328 
1329 int esp_pre_save(void *opaque)
1330 {
1331     ESPState *s = ESP(object_resolve_path_component(
1332                       OBJECT(opaque), "esp"));
1333 
1334     s->mig_version_id = vmstate_esp.version_id;
1335     return 0;
1336 }
1337 
1338 static int esp_post_load(void *opaque, int version_id)
1339 {
1340     ESPState *s = ESP(opaque);
1341     int len, i;
1342 
1343     version_id = MIN(version_id, s->mig_version_id);
1344 
1345     if (version_id < 5) {
1346         esp_set_tc(s, s->mig_dma_left);
1347 
1348         /* Migrate ti_buf to fifo */
1349         len = s->mig_ti_wptr - s->mig_ti_rptr;
1350         for (i = 0; i < len; i++) {
1351             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1352         }
1353 
1354         /* Migrate cmdbuf to cmdfifo */
1355         for (i = 0; i < s->mig_cmdlen; i++) {
1356             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1357         }
1358     }
1359 
1360     s->mig_version_id = vmstate_esp.version_id;
1361     return 0;
1362 }
1363 
1364 const VMStateDescription vmstate_esp = {
1365     .name = "esp",
1366     .version_id = 7,
1367     .minimum_version_id = 3,
1368     .post_load = esp_post_load,
1369     .fields = (const VMStateField[]) {
1370         VMSTATE_BUFFER(rregs, ESPState),
1371         VMSTATE_BUFFER(wregs, ESPState),
1372         VMSTATE_INT32(ti_size, ESPState),
1373         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1374         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1375         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1376         VMSTATE_UINT32(status, ESPState),
1377         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1378                             esp_is_before_version_5),
1379         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1380                           esp_is_before_version_5),
1381         VMSTATE_UINT32(dma, ESPState),
1382         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1383                               esp_is_before_version_5, 0, 16),
1384         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1385                               esp_is_before_version_5, 16,
1386                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1387         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1388         VMSTATE_UINT32(do_cmd, ESPState),
1389         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1390         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1391         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1392         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1393         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1394         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1395                            esp_is_between_version_5_and_6),
1396         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1397         VMSTATE_BOOL(drq_state, ESPState),
1398         VMSTATE_END_OF_LIST()
1399     },
1400 };
1401 
1402 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1403                                  uint64_t val, unsigned int size)
1404 {
1405     SysBusESPState *sysbus = opaque;
1406     ESPState *s = ESP(&sysbus->esp);
1407     uint32_t saddr;
1408 
1409     saddr = addr >> sysbus->it_shift;
1410     esp_reg_write(s, saddr, val);
1411 }
1412 
1413 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1414                                     unsigned int size)
1415 {
1416     SysBusESPState *sysbus = opaque;
1417     ESPState *s = ESP(&sysbus->esp);
1418     uint32_t saddr;
1419 
1420     saddr = addr >> sysbus->it_shift;
1421     return esp_reg_read(s, saddr);
1422 }
1423 
1424 static const MemoryRegionOps sysbus_esp_mem_ops = {
1425     .read = sysbus_esp_mem_read,
1426     .write = sysbus_esp_mem_write,
1427     .endianness = DEVICE_NATIVE_ENDIAN,
1428     .valid.accepts = esp_mem_accepts,
1429 };
1430 
1431 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1432                                   uint64_t val, unsigned int size)
1433 {
1434     SysBusESPState *sysbus = opaque;
1435     ESPState *s = ESP(&sysbus->esp);
1436 
1437     trace_esp_pdma_write(size);
1438 
1439     switch (size) {
1440     case 1:
1441         esp_pdma_write(s, val);
1442         break;
1443     case 2:
1444         esp_pdma_write(s, val >> 8);
1445         esp_pdma_write(s, val);
1446         break;
1447     }
1448     esp_do_dma(s);
1449 }
1450 
1451 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1452                                      unsigned int size)
1453 {
1454     SysBusESPState *sysbus = opaque;
1455     ESPState *s = ESP(&sysbus->esp);
1456     uint64_t val = 0;
1457 
1458     trace_esp_pdma_read(size);
1459 
1460     switch (size) {
1461     case 1:
1462         val = esp_pdma_read(s);
1463         break;
1464     case 2:
1465         val = esp_pdma_read(s);
1466         val = (val << 8) | esp_pdma_read(s);
1467         break;
1468     }
1469     esp_do_dma(s);
1470     return val;
1471 }
1472 
1473 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1474 {
1475     ESPState *s = container_of(req->bus, ESPState, bus);
1476 
1477     scsi_req_ref(req);
1478     s->current_req = req;
1479     return s;
1480 }
1481 
1482 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1483     .read = sysbus_esp_pdma_read,
1484     .write = sysbus_esp_pdma_write,
1485     .endianness = DEVICE_NATIVE_ENDIAN,
1486     .valid.min_access_size = 1,
1487     .valid.max_access_size = 4,
1488     .impl.min_access_size = 1,
1489     .impl.max_access_size = 2,
1490 };
1491 
1492 static const struct SCSIBusInfo esp_scsi_info = {
1493     .tcq = false,
1494     .max_target = ESP_MAX_DEVS,
1495     .max_lun = 7,
1496 
1497     .load_request = esp_load_request,
1498     .transfer_data = esp_transfer_data,
1499     .complete = esp_command_complete,
1500     .cancel = esp_request_cancelled
1501 };
1502 
1503 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1504 {
1505     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1506     ESPState *s = ESP(&sysbus->esp);
1507 
1508     switch (irq) {
1509     case 0:
1510         parent_esp_reset(s, irq, level);
1511         break;
1512     case 1:
1513         esp_dma_enable(s, irq, level);
1514         break;
1515     }
1516 }
1517 
1518 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1519 {
1520     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1521     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1522     ESPState *s = ESP(&sysbus->esp);
1523 
1524     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1525         return;
1526     }
1527 
1528     sysbus_init_irq(sbd, &s->irq);
1529     sysbus_init_irq(sbd, &s->drq_irq);
1530     assert(sysbus->it_shift != -1);
1531 
1532     s->chip_id = TCHI_FAS100A;
1533     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1534                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1535     sysbus_init_mmio(sbd, &sysbus->iomem);
1536     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1537                           sysbus, "esp-pdma", 4);
1538     sysbus_init_mmio(sbd, &sysbus->pdma);
1539 
1540     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1541 
1542     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1543 }
1544 
1545 static void sysbus_esp_hard_reset(DeviceState *dev)
1546 {
1547     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1548     ESPState *s = ESP(&sysbus->esp);
1549 
1550     esp_hard_reset(s);
1551 }
1552 
1553 static void sysbus_esp_init(Object *obj)
1554 {
1555     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1556 
1557     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1558 }
1559 
1560 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1561     .name = "sysbusespscsi",
1562     .version_id = 2,
1563     .minimum_version_id = 1,
1564     .pre_save = esp_pre_save,
1565     .fields = (const VMStateField[]) {
1566         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1567         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1568         VMSTATE_END_OF_LIST()
1569     }
1570 };
1571 
1572 static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
1573 {
1574     DeviceClass *dc = DEVICE_CLASS(klass);
1575 
1576     dc->realize = sysbus_esp_realize;
1577     device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1578     dc->vmsd = &vmstate_sysbus_esp_scsi;
1579     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1580 }
1581 
1582 static void esp_finalize(Object *obj)
1583 {
1584     ESPState *s = ESP(obj);
1585 
1586     fifo8_destroy(&s->fifo);
1587     fifo8_destroy(&s->cmdfifo);
1588 }
1589 
1590 static void esp_init(Object *obj)
1591 {
1592     ESPState *s = ESP(obj);
1593 
1594     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1595     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1596 }
1597 
1598 static void esp_class_init(ObjectClass *klass, const void *data)
1599 {
1600     DeviceClass *dc = DEVICE_CLASS(klass);
1601 
1602     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1603     dc->user_creatable = false;
1604     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1605 }
1606 
1607 static const TypeInfo esp_info_types[] = {
1608     {
1609         .name          = TYPE_SYSBUS_ESP,
1610         .parent        = TYPE_SYS_BUS_DEVICE,
1611         .instance_init = sysbus_esp_init,
1612         .instance_size = sizeof(SysBusESPState),
1613         .class_init    = sysbus_esp_class_init,
1614     },
1615     {
1616         .name = TYPE_ESP,
1617         .parent = TYPE_DEVICE,
1618         .instance_init = esp_init,
1619         .instance_finalize = esp_finalize,
1620         .instance_size = sizeof(ESPState),
1621         .class_init = esp_class_init,
1622     },
1623 };
1624 
1625 DEFINE_TYPES(esp_info_types)
1626