xref: /openbmc/qemu/hw/scsi/esp.c (revision e70aa5dc299c49c59bd91c80e771db56996c2188)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 static const char *esp_phase_names[8] = {
83     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86 
87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89     s->rregs[ESP_RSTAT] &= ~7;
90     s->rregs[ESP_RSTAT] |= phase;
91 
92     trace_esp_set_phase(esp_phase_names[phase]);
93 }
94 
95 static uint8_t esp_get_phase(ESPState *s)
96 {
97     return s->rregs[ESP_RSTAT] & 7;
98 }
99 
100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102     if (level) {
103         s->dma_enabled = 1;
104         trace_esp_dma_enable();
105         if (s->dma_cb) {
106             s->dma_cb(s);
107             s->dma_cb = NULL;
108         }
109     } else {
110         trace_esp_dma_disable();
111         s->dma_enabled = 0;
112     }
113 }
114 
115 void esp_request_cancelled(SCSIRequest *req)
116 {
117     ESPState *s = req->hba_private;
118 
119     if (req == s->current_req) {
120         scsi_req_unref(s->current_req);
121         s->current_req = NULL;
122         s->current_dev = NULL;
123         s->async_len = 0;
124     }
125 }
126 
127 static void esp_update_drq(ESPState *s)
128 {
129     bool to_device;
130 
131     switch (esp_get_phase(s)) {
132     case STAT_MO:
133     case STAT_CD:
134     case STAT_DO:
135         to_device = true;
136         break;
137 
138     case STAT_DI:
139     case STAT_ST:
140     case STAT_MI:
141         to_device = false;
142         break;
143 
144     default:
145         return;
146     }
147 
148     if (s->dma) {
149         /* DMA request so update DRQ according to transfer direction */
150         if (to_device) {
151             if (fifo8_num_free(&s->fifo) < 2) {
152                 esp_lower_drq(s);
153             } else {
154                 esp_raise_drq(s);
155             }
156         } else {
157             if (fifo8_num_used(&s->fifo) < 2) {
158                 esp_lower_drq(s);
159             } else {
160                 esp_raise_drq(s);
161             }
162         }
163     } else {
164         /* Not a DMA request */
165         esp_lower_drq(s);
166     }
167 }
168 
169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172         trace_esp_error_fifo_overrun();
173     } else {
174         fifo8_push(&s->fifo, val);
175     }
176 
177     esp_update_drq(s);
178 }
179 
180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182     fifo8_push_all(&s->fifo, buf, len);
183     esp_update_drq(s);
184 }
185 
186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188     uint8_t val;
189 
190     if (fifo8_is_empty(&s->fifo)) {
191         val = 0;
192     } else {
193         val = fifo8_pop(&s->fifo);
194     }
195 
196     esp_update_drq(s);
197     return val;
198 }
199 
200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202     uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203 
204     esp_update_drq(s);
205     return len;
206 }
207 
208 static uint32_t esp_get_tc(ESPState *s)
209 {
210     uint32_t dmalen;
211 
212     dmalen = s->rregs[ESP_TCLO];
213     dmalen |= s->rregs[ESP_TCMID] << 8;
214     dmalen |= s->rregs[ESP_TCHI] << 16;
215 
216     return dmalen;
217 }
218 
219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221     uint32_t old_tc = esp_get_tc(s);
222 
223     s->rregs[ESP_TCLO] = dmalen;
224     s->rregs[ESP_TCMID] = dmalen >> 8;
225     s->rregs[ESP_TCHI] = dmalen >> 16;
226 
227     if (old_tc && dmalen == 0) {
228         s->rregs[ESP_RSTAT] |= STAT_TC;
229     }
230 }
231 
232 static uint32_t esp_get_stc(ESPState *s)
233 {
234     uint32_t dmalen;
235 
236     dmalen = s->wregs[ESP_TCLO];
237     dmalen |= s->wregs[ESP_TCMID] << 8;
238     dmalen |= s->wregs[ESP_TCHI] << 16;
239 
240     return dmalen;
241 }
242 
243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245     return esp_fifo_pop(s);
246 }
247 
248 static void esp_pdma_write(ESPState *s, uint8_t val)
249 {
250     uint32_t dmalen = esp_get_tc(s);
251 
252     esp_fifo_push(s, val);
253 
254     if (dmalen && s->drq_state) {
255         dmalen--;
256         esp_set_tc(s, dmalen);
257     }
258 }
259 
260 static int esp_select(ESPState *s)
261 {
262     int target;
263 
264     target = s->wregs[ESP_WBUSID] & BUSID_DID;
265 
266     s->ti_size = 0;
267     s->rregs[ESP_RSEQ] = SEQ_0;
268 
269     if (s->current_req) {
270         /* Started a new command before the old one finished. Cancel it. */
271         scsi_req_cancel(s->current_req);
272     }
273 
274     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
275     if (!s->current_dev) {
276         /* No such drive */
277         s->rregs[ESP_RSTAT] = 0;
278         s->rregs[ESP_RINTR] = INTR_DC;
279         esp_raise_irq(s);
280         return -1;
281     }
282 
283     /*
284      * Note that we deliberately don't raise the IRQ here: this will be done
285      * either in esp_transfer_data() or esp_command_complete()
286      */
287     return 0;
288 }
289 
290 static void esp_do_dma(ESPState *s);
291 static void esp_do_nodma(ESPState *s);
292 
293 static void do_command_phase(ESPState *s)
294 {
295     uint32_t cmdlen;
296     int32_t datalen;
297     SCSIDevice *current_lun;
298     uint8_t buf[ESP_CMDFIFO_SZ];
299 
300     trace_esp_do_command_phase(s->lun);
301     cmdlen = fifo8_num_used(&s->cmdfifo);
302     if (!cmdlen || !s->current_dev) {
303         return;
304     }
305     fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
306 
307     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
308     if (!current_lun) {
309         /* No such drive */
310         s->rregs[ESP_RSTAT] = 0;
311         s->rregs[ESP_RINTR] = INTR_DC;
312         s->rregs[ESP_RSEQ] = SEQ_0;
313         esp_raise_irq(s);
314         return;
315     }
316 
317     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
318     datalen = scsi_req_enqueue(s->current_req);
319     s->ti_size = datalen;
320     fifo8_reset(&s->cmdfifo);
321     s->data_ready = false;
322     if (datalen != 0) {
323         /*
324          * Switch to DATA phase but wait until initial data xfer is
325          * complete before raising the command completion interrupt
326          */
327         if (datalen > 0) {
328             esp_set_phase(s, STAT_DI);
329         } else {
330             esp_set_phase(s, STAT_DO);
331         }
332         scsi_req_continue(s->current_req);
333         return;
334     }
335 }
336 
337 static void do_message_phase(ESPState *s)
338 {
339     if (s->cmdfifo_cdb_offset) {
340         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
341                           fifo8_pop(&s->cmdfifo);
342 
343         trace_esp_do_identify(message);
344         s->lun = message & 7;
345         s->cmdfifo_cdb_offset--;
346     }
347 
348     /* Ignore extended messages for now */
349     if (s->cmdfifo_cdb_offset) {
350         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
351         fifo8_drop(&s->cmdfifo, len);
352         s->cmdfifo_cdb_offset = 0;
353     }
354 }
355 
356 static void do_cmd(ESPState *s)
357 {
358     do_message_phase(s);
359     assert(s->cmdfifo_cdb_offset == 0);
360     do_command_phase(s);
361 }
362 
363 static void handle_satn(ESPState *s)
364 {
365     if (s->dma && !s->dma_enabled) {
366         s->dma_cb = handle_satn;
367         return;
368     }
369 
370     if (esp_select(s) < 0) {
371         return;
372     }
373 
374     esp_set_phase(s, STAT_MO);
375 
376     if (s->dma) {
377         esp_do_dma(s);
378     } else {
379         esp_do_nodma(s);
380     }
381 }
382 
383 static void handle_s_without_atn(ESPState *s)
384 {
385     if (s->dma && !s->dma_enabled) {
386         s->dma_cb = handle_s_without_atn;
387         return;
388     }
389 
390     if (esp_select(s) < 0) {
391         return;
392     }
393 
394     esp_set_phase(s, STAT_CD);
395     s->cmdfifo_cdb_offset = 0;
396 
397     if (s->dma) {
398         esp_do_dma(s);
399     } else {
400         esp_do_nodma(s);
401     }
402 }
403 
404 static void handle_satn_stop(ESPState *s)
405 {
406     if (s->dma && !s->dma_enabled) {
407         s->dma_cb = handle_satn_stop;
408         return;
409     }
410 
411     if (esp_select(s) < 0) {
412         return;
413     }
414 
415     esp_set_phase(s, STAT_MO);
416     s->cmdfifo_cdb_offset = 0;
417 
418     if (s->dma) {
419         esp_do_dma(s);
420     } else {
421         esp_do_nodma(s);
422     }
423 }
424 
425 static void handle_pad(ESPState *s)
426 {
427     if (s->dma) {
428         esp_do_dma(s);
429     } else {
430         esp_do_nodma(s);
431     }
432 }
433 
434 static void write_response(ESPState *s)
435 {
436     trace_esp_write_response(s->status);
437 
438     if (s->dma) {
439         esp_do_dma(s);
440     } else {
441         esp_do_nodma(s);
442     }
443 }
444 
445 static bool esp_cdb_ready(ESPState *s)
446 {
447     int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
448     const uint8_t *pbuf;
449     uint32_t n;
450     int cdblen;
451 
452     if (len <= 0) {
453         return false;
454     }
455 
456     pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
457     if (n < len) {
458         /*
459          * In normal use the cmdfifo should never wrap, but include this check
460          * to prevent a malicious guest from reading past the end of the
461          * cmdfifo data buffer below
462          */
463         return false;
464     }
465 
466     cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
467 
468     return cdblen < 0 ? false : (len >= cdblen);
469 }
470 
471 static void esp_dma_ti_check(ESPState *s)
472 {
473     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
474         s->rregs[ESP_RINTR] |= INTR_BS;
475         esp_raise_irq(s);
476     }
477 }
478 
479 static void esp_do_dma(ESPState *s)
480 {
481     uint32_t len, cmdlen;
482     uint8_t buf[ESP_CMDFIFO_SZ];
483 
484     len = esp_get_tc(s);
485 
486     switch (esp_get_phase(s)) {
487     case STAT_MO:
488         if (s->dma_memory_read) {
489             len = MIN(len, fifo8_num_free(&s->cmdfifo));
490             s->dma_memory_read(s->dma_opaque, buf, len);
491             esp_set_tc(s, esp_get_tc(s) - len);
492         } else {
493             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
494             len = MIN(fifo8_num_free(&s->cmdfifo), len);
495         }
496 
497         fifo8_push_all(&s->cmdfifo, buf, len);
498         s->cmdfifo_cdb_offset += len;
499 
500         switch (s->rregs[ESP_CMD]) {
501         case CMD_SELATN | CMD_DMA:
502             if (fifo8_num_used(&s->cmdfifo) >= 1) {
503                 /* First byte received, switch to command phase */
504                 esp_set_phase(s, STAT_CD);
505                 s->rregs[ESP_RSEQ] = SEQ_CD;
506                 s->cmdfifo_cdb_offset = 1;
507 
508                 if (fifo8_num_used(&s->cmdfifo) > 1) {
509                     /* Process any additional command phase data */
510                     esp_do_dma(s);
511                 }
512             }
513             break;
514 
515         case CMD_SELATNS | CMD_DMA:
516             if (fifo8_num_used(&s->cmdfifo) == 1) {
517                 /* First byte received, stop in message out phase */
518                 s->rregs[ESP_RSEQ] = SEQ_MO;
519                 s->cmdfifo_cdb_offset = 1;
520 
521                 /* Raise command completion interrupt */
522                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
523                 esp_raise_irq(s);
524             }
525             break;
526 
527         case CMD_TI | CMD_DMA:
528             /* ATN remains asserted until TC == 0 */
529             if (esp_get_tc(s) == 0) {
530                 esp_set_phase(s, STAT_CD);
531                 s->rregs[ESP_CMD] = 0;
532                 s->rregs[ESP_RINTR] |= INTR_BS;
533                 esp_raise_irq(s);
534             }
535             break;
536         }
537         break;
538 
539     case STAT_CD:
540         cmdlen = fifo8_num_used(&s->cmdfifo);
541         trace_esp_do_dma(cmdlen, len);
542         if (s->dma_memory_read) {
543             len = MIN(len, fifo8_num_free(&s->cmdfifo));
544             s->dma_memory_read(s->dma_opaque, buf, len);
545             fifo8_push_all(&s->cmdfifo, buf, len);
546             esp_set_tc(s, esp_get_tc(s) - len);
547         } else {
548             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
549             len = MIN(fifo8_num_free(&s->cmdfifo), len);
550             fifo8_push_all(&s->cmdfifo, buf, len);
551         }
552         trace_esp_handle_ti_cmd(cmdlen);
553         s->ti_size = 0;
554         if (esp_get_tc(s) == 0) {
555             /* Command has been received */
556             do_cmd(s);
557         }
558         break;
559 
560     case STAT_DO:
561         if (!s->current_req) {
562             return;
563         }
564         if (s->async_len == 0 && esp_get_tc(s)) {
565             /* Defer until data is available.  */
566             return;
567         }
568         if (len > s->async_len) {
569             len = s->async_len;
570         }
571 
572         switch (s->rregs[ESP_CMD]) {
573         case CMD_TI | CMD_DMA:
574             if (s->dma_memory_read) {
575                 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
576                 esp_set_tc(s, esp_get_tc(s) - len);
577             } else {
578                 /* Copy FIFO data to device */
579                 len = MIN(s->async_len, ESP_FIFO_SZ);
580                 len = MIN(len, fifo8_num_used(&s->fifo));
581                 len = esp_fifo_pop_buf(s, s->async_buf, len);
582             }
583 
584             s->async_buf += len;
585             s->async_len -= len;
586             s->ti_size += len;
587             break;
588 
589         case CMD_PAD | CMD_DMA:
590             /* Copy TC zero bytes into the incoming stream */
591             if (!s->dma_memory_read) {
592                 len = MIN(s->async_len, ESP_FIFO_SZ);
593                 len = MIN(len, fifo8_num_free(&s->fifo));
594             }
595 
596             memset(s->async_buf, 0, len);
597 
598             s->async_buf += len;
599             s->async_len -= len;
600             s->ti_size += len;
601             break;
602         }
603 
604         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
605             /* Defer until the scsi layer has completed */
606             scsi_req_continue(s->current_req);
607             return;
608         }
609 
610         esp_dma_ti_check(s);
611         break;
612 
613     case STAT_DI:
614         if (!s->current_req) {
615             return;
616         }
617         if (s->async_len == 0 && esp_get_tc(s)) {
618             /* Defer until data is available.  */
619             return;
620         }
621         if (len > s->async_len) {
622             len = s->async_len;
623         }
624 
625         switch (s->rregs[ESP_CMD]) {
626         case CMD_TI | CMD_DMA:
627             if (s->dma_memory_write) {
628                 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
629             } else {
630                 /* Copy device data to FIFO */
631                 len = MIN(len, fifo8_num_free(&s->fifo));
632                 esp_fifo_push_buf(s, s->async_buf, len);
633             }
634 
635             s->async_buf += len;
636             s->async_len -= len;
637             s->ti_size -= len;
638             esp_set_tc(s, esp_get_tc(s) - len);
639             break;
640 
641         case CMD_PAD | CMD_DMA:
642             /* Drop TC bytes from the incoming stream */
643             if (!s->dma_memory_write) {
644                 len = MIN(len, fifo8_num_free(&s->fifo));
645             }
646 
647             s->async_buf += len;
648             s->async_len -= len;
649             s->ti_size -= len;
650             esp_set_tc(s, esp_get_tc(s) - len);
651             break;
652         }
653 
654         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
655             /* If the guest underflows TC then terminate SCSI request */
656             scsi_req_continue(s->current_req);
657             return;
658         }
659 
660         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
661             /* Defer until the scsi layer has completed */
662             scsi_req_continue(s->current_req);
663             return;
664         }
665 
666         esp_dma_ti_check(s);
667         break;
668 
669     case STAT_ST:
670         switch (s->rregs[ESP_CMD]) {
671         case CMD_ICCS | CMD_DMA:
672             len = MIN(len, 1);
673 
674             if (len) {
675                 buf[0] = s->status;
676 
677                 if (s->dma_memory_write) {
678                     s->dma_memory_write(s->dma_opaque, buf, len);
679                 } else {
680                     esp_fifo_push_buf(s, buf, len);
681                 }
682 
683                 esp_set_tc(s, esp_get_tc(s) - len);
684                 esp_set_phase(s, STAT_MI);
685 
686                 if (esp_get_tc(s) > 0) {
687                     /* Process any message in phase data */
688                     esp_do_dma(s);
689                 }
690             }
691             break;
692 
693         default:
694             /* Consume remaining data if the guest underflows TC */
695             if (fifo8_num_used(&s->fifo) < 2) {
696                 s->rregs[ESP_RINTR] |= INTR_BS;
697                 esp_raise_irq(s);
698             }
699             break;
700         }
701         break;
702 
703     case STAT_MI:
704         switch (s->rregs[ESP_CMD]) {
705         case CMD_ICCS | CMD_DMA:
706             len = MIN(len, 1);
707 
708             if (len) {
709                 buf[0] = 0;
710 
711                 if (s->dma_memory_write) {
712                     s->dma_memory_write(s->dma_opaque, buf, len);
713                 } else {
714                     esp_fifo_push_buf(s, buf, len);
715                 }
716 
717                 esp_set_tc(s, esp_get_tc(s) - len);
718 
719                 /* Raise end of command interrupt */
720                 s->rregs[ESP_RINTR] |= INTR_FC;
721                 esp_raise_irq(s);
722             }
723             break;
724         }
725         break;
726     }
727 }
728 
729 static void esp_nodma_ti_dataout(ESPState *s)
730 {
731     int len;
732 
733     if (!s->current_req) {
734         return;
735     }
736     if (s->async_len == 0) {
737         /* Defer until data is available.  */
738         return;
739     }
740     len = MIN(s->async_len, ESP_FIFO_SZ);
741     len = MIN(len, fifo8_num_used(&s->fifo));
742     esp_fifo_pop_buf(s, s->async_buf, len);
743     s->async_buf += len;
744     s->async_len -= len;
745     s->ti_size += len;
746 
747     if (s->async_len == 0) {
748         scsi_req_continue(s->current_req);
749         return;
750     }
751 
752     s->rregs[ESP_RINTR] |= INTR_BS;
753     esp_raise_irq(s);
754 }
755 
756 static void esp_do_nodma(ESPState *s)
757 {
758     uint8_t buf[ESP_FIFO_SZ];
759     uint32_t cmdlen;
760     int len;
761 
762     switch (esp_get_phase(s)) {
763     case STAT_MO:
764         switch (s->rregs[ESP_CMD]) {
765         case CMD_SELATN:
766             /* Copy FIFO into cmdfifo */
767             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
768             len = MIN(fifo8_num_free(&s->cmdfifo), len);
769             fifo8_push_all(&s->cmdfifo, buf, len);
770 
771             if (fifo8_num_used(&s->cmdfifo) >= 1) {
772                 /* First byte received, switch to command phase */
773                 esp_set_phase(s, STAT_CD);
774                 s->rregs[ESP_RSEQ] = SEQ_CD;
775                 s->cmdfifo_cdb_offset = 1;
776 
777                 if (fifo8_num_used(&s->cmdfifo) > 1) {
778                     /* Process any additional command phase data */
779                     esp_do_nodma(s);
780                 }
781             }
782             break;
783 
784         case CMD_SELATNS:
785             /* Copy one byte from FIFO into cmdfifo */
786             len = esp_fifo_pop_buf(s, buf,
787                                    MIN(fifo8_num_used(&s->fifo), 1));
788             len = MIN(fifo8_num_free(&s->cmdfifo), len);
789             fifo8_push_all(&s->cmdfifo, buf, len);
790 
791             if (fifo8_num_used(&s->cmdfifo) >= 1) {
792                 /* First byte received, stop in message out phase */
793                 s->rregs[ESP_RSEQ] = SEQ_MO;
794                 s->cmdfifo_cdb_offset = 1;
795 
796                 /* Raise command completion interrupt */
797                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
798                 esp_raise_irq(s);
799             }
800             break;
801 
802         case CMD_TI:
803             /* Copy FIFO into cmdfifo */
804             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
805             len = MIN(fifo8_num_free(&s->cmdfifo), len);
806             fifo8_push_all(&s->cmdfifo, buf, len);
807 
808             /* ATN remains asserted until FIFO empty */
809             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
810             esp_set_phase(s, STAT_CD);
811             s->rregs[ESP_CMD] = 0;
812             s->rregs[ESP_RINTR] |= INTR_BS;
813             esp_raise_irq(s);
814             break;
815         }
816         break;
817 
818     case STAT_CD:
819         switch (s->rregs[ESP_CMD]) {
820         case CMD_TI:
821             /* Copy FIFO into cmdfifo */
822             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
823             len = MIN(fifo8_num_free(&s->cmdfifo), len);
824             fifo8_push_all(&s->cmdfifo, buf, len);
825 
826             cmdlen = fifo8_num_used(&s->cmdfifo);
827             trace_esp_handle_ti_cmd(cmdlen);
828 
829             /* CDB may be transferred in one or more TI commands */
830             if (esp_cdb_ready(s)) {
831                 /* Command has been received */
832                 do_cmd(s);
833             } else {
834                 /*
835                  * If data was transferred from the FIFO then raise bus
836                  * service interrupt to indicate transfer complete. Otherwise
837                  * defer until the next FIFO write.
838                  */
839                 if (len) {
840                     /* Raise interrupt to indicate transfer complete */
841                     s->rregs[ESP_RINTR] |= INTR_BS;
842                     esp_raise_irq(s);
843                 }
844             }
845             break;
846 
847         case CMD_SEL | CMD_DMA:
848         case CMD_SELATN | CMD_DMA:
849             /* Copy FIFO into cmdfifo */
850             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
851             len = MIN(fifo8_num_free(&s->cmdfifo), len);
852             fifo8_push_all(&s->cmdfifo, buf, len);
853 
854             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
855             if (esp_cdb_ready(s)) {
856                 /* Command has been received */
857                 do_cmd(s);
858             }
859             break;
860 
861         case CMD_SEL:
862         case CMD_SELATN:
863             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
864             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
865             len = MIN(fifo8_num_free(&s->cmdfifo), len);
866             fifo8_push_all(&s->cmdfifo, buf, len);
867 
868             do_cmd(s);
869             break;
870         }
871         break;
872 
873     case STAT_DO:
874         /* Accumulate data in FIFO until non-DMA TI is executed */
875         break;
876 
877     case STAT_DI:
878         if (!s->current_req) {
879             return;
880         }
881         if (s->async_len == 0) {
882             /* Defer until data is available.  */
883             return;
884         }
885         if (fifo8_is_empty(&s->fifo)) {
886             esp_fifo_push(s, s->async_buf[0]);
887             s->async_buf++;
888             s->async_len--;
889             s->ti_size--;
890         }
891 
892         if (s->async_len == 0) {
893             scsi_req_continue(s->current_req);
894             return;
895         }
896 
897         /* If preloading the FIFO, defer until TI command issued */
898         if (s->rregs[ESP_CMD] != CMD_TI) {
899             return;
900         }
901 
902         s->rregs[ESP_RINTR] |= INTR_BS;
903         esp_raise_irq(s);
904         break;
905 
906     case STAT_ST:
907         switch (s->rregs[ESP_CMD]) {
908         case CMD_ICCS:
909             esp_fifo_push(s, s->status);
910             esp_set_phase(s, STAT_MI);
911 
912             /* Process any message in phase data */
913             esp_do_nodma(s);
914             break;
915         }
916         break;
917 
918     case STAT_MI:
919         switch (s->rregs[ESP_CMD]) {
920         case CMD_ICCS:
921             esp_fifo_push(s, 0);
922 
923             /* Raise end of command interrupt */
924             s->rregs[ESP_RINTR] |= INTR_FC;
925             esp_raise_irq(s);
926             break;
927         }
928         break;
929     }
930 }
931 
932 void esp_command_complete(SCSIRequest *req, size_t resid)
933 {
934     ESPState *s = req->hba_private;
935     int to_device = (esp_get_phase(s) == STAT_DO);
936 
937     trace_esp_command_complete();
938 
939     /*
940      * Non-DMA transfers from the target will leave the last byte in
941      * the FIFO so don't reset ti_size in this case
942      */
943     if (s->dma || to_device) {
944         if (s->ti_size != 0) {
945             trace_esp_command_complete_unexpected();
946         }
947     }
948 
949     s->async_len = 0;
950     if (req->status) {
951         trace_esp_command_complete_fail();
952     }
953     s->status = req->status;
954 
955     /*
956      * Switch to status phase. For non-DMA transfers from the target the last
957      * byte is still in the FIFO
958      */
959     s->ti_size = 0;
960 
961     switch (s->rregs[ESP_CMD]) {
962     case CMD_SEL | CMD_DMA:
963     case CMD_SEL:
964     case CMD_SELATN | CMD_DMA:
965     case CMD_SELATN:
966         /*
967          * No data phase for sequencer command so raise deferred bus service
968          * and function complete interrupt
969          */
970         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
971         s->rregs[ESP_RSEQ] = SEQ_CD;
972         break;
973 
974     case CMD_TI | CMD_DMA:
975     case CMD_TI:
976         s->rregs[ESP_CMD] = 0;
977         break;
978     }
979 
980     /* Raise bus service interrupt to indicate change to STATUS phase */
981     esp_set_phase(s, STAT_ST);
982     s->rregs[ESP_RINTR] |= INTR_BS;
983     esp_raise_irq(s);
984 
985     if (s->current_req) {
986         scsi_req_unref(s->current_req);
987         s->current_req = NULL;
988         s->current_dev = NULL;
989     }
990 }
991 
992 void esp_transfer_data(SCSIRequest *req, uint32_t len)
993 {
994     ESPState *s = req->hba_private;
995     uint32_t dmalen = esp_get_tc(s);
996 
997     trace_esp_transfer_data(dmalen, s->ti_size);
998     s->async_len = len;
999     s->async_buf = scsi_req_get_buf(req);
1000 
1001     if (!s->data_ready) {
1002         s->data_ready = true;
1003 
1004         switch (s->rregs[ESP_CMD]) {
1005         case CMD_SEL | CMD_DMA:
1006         case CMD_SEL:
1007         case CMD_SELATN | CMD_DMA:
1008         case CMD_SELATN:
1009             /*
1010              * Initial incoming data xfer is complete for sequencer command
1011              * so raise deferred bus service and function complete interrupt
1012              */
1013              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1014              s->rregs[ESP_RSEQ] = SEQ_CD;
1015              esp_raise_irq(s);
1016              break;
1017 
1018         case CMD_SELATNS | CMD_DMA:
1019         case CMD_SELATNS:
1020             /*
1021              * Initial incoming data xfer is complete so raise command
1022              * completion interrupt
1023              */
1024              s->rregs[ESP_RINTR] |= INTR_BS;
1025              s->rregs[ESP_RSEQ] = SEQ_MO;
1026              esp_raise_irq(s);
1027              break;
1028 
1029         case CMD_TI | CMD_DMA:
1030         case CMD_TI:
1031             /*
1032              * If the final COMMAND phase data was transferred using a TI
1033              * command, clear ESP_CMD to terminate the TI command and raise
1034              * the completion interrupt
1035              */
1036             s->rregs[ESP_CMD] = 0;
1037             s->rregs[ESP_RINTR] |= INTR_BS;
1038             esp_raise_irq(s);
1039             break;
1040         }
1041     }
1042 
1043     /*
1044      * Always perform the initial transfer upon reception of the next TI
1045      * command to ensure the DMA/non-DMA status of the command is correct.
1046      * It is not possible to use s->dma directly in the section below as
1047      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1048      * async data transfer is delayed then s->dma is set incorrectly.
1049      */
1050 
1051     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1052         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1053         esp_dma_ti_check(s);
1054 
1055         esp_do_dma(s);
1056     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1057         esp_do_nodma(s);
1058     }
1059 }
1060 
1061 static void handle_ti(ESPState *s)
1062 {
1063     uint32_t dmalen;
1064 
1065     if (s->dma && !s->dma_enabled) {
1066         s->dma_cb = handle_ti;
1067         return;
1068     }
1069 
1070     if (s->dma) {
1071         dmalen = esp_get_tc(s);
1072         trace_esp_handle_ti(dmalen);
1073         esp_do_dma(s);
1074     } else {
1075         trace_esp_handle_ti(s->ti_size);
1076         esp_do_nodma(s);
1077 
1078         if (esp_get_phase(s) == STAT_DO) {
1079             esp_nodma_ti_dataout(s);
1080         }
1081     }
1082 }
1083 
1084 void esp_hard_reset(ESPState *s)
1085 {
1086     memset(s->rregs, 0, ESP_REGS);
1087     memset(s->wregs, 0, ESP_REGS);
1088     s->tchi_written = 0;
1089     s->ti_size = 0;
1090     s->async_len = 0;
1091     fifo8_reset(&s->fifo);
1092     fifo8_reset(&s->cmdfifo);
1093     s->dma = 0;
1094     s->dma_cb = NULL;
1095 
1096     s->rregs[ESP_CFG1] = 7;
1097 }
1098 
1099 static void esp_soft_reset(ESPState *s)
1100 {
1101     qemu_irq_lower(s->irq);
1102     qemu_irq_lower(s->drq_irq);
1103     esp_hard_reset(s);
1104 }
1105 
1106 static void esp_bus_reset(ESPState *s)
1107 {
1108     bus_cold_reset(BUS(&s->bus));
1109 }
1110 
1111 static void parent_esp_reset(ESPState *s, int irq, int level)
1112 {
1113     if (level) {
1114         esp_soft_reset(s);
1115     }
1116 }
1117 
1118 static void esp_run_cmd(ESPState *s)
1119 {
1120     uint8_t cmd = s->rregs[ESP_CMD];
1121 
1122     if (cmd & CMD_DMA) {
1123         s->dma = 1;
1124         /* Reload DMA counter.  */
1125         if (esp_get_stc(s) == 0) {
1126             esp_set_tc(s, 0x10000);
1127         } else {
1128             esp_set_tc(s, esp_get_stc(s));
1129         }
1130     } else {
1131         s->dma = 0;
1132     }
1133     switch (cmd & CMD_CMD) {
1134     case CMD_NOP:
1135         trace_esp_mem_writeb_cmd_nop(cmd);
1136         break;
1137     case CMD_FLUSH:
1138         trace_esp_mem_writeb_cmd_flush(cmd);
1139         fifo8_reset(&s->fifo);
1140         break;
1141     case CMD_RESET:
1142         trace_esp_mem_writeb_cmd_reset(cmd);
1143         esp_soft_reset(s);
1144         break;
1145     case CMD_BUSRESET:
1146         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1147         esp_bus_reset(s);
1148         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1149             s->rregs[ESP_RINTR] |= INTR_RST;
1150             esp_raise_irq(s);
1151         }
1152         break;
1153     case CMD_TI:
1154         trace_esp_mem_writeb_cmd_ti(cmd);
1155         handle_ti(s);
1156         break;
1157     case CMD_ICCS:
1158         trace_esp_mem_writeb_cmd_iccs(cmd);
1159         write_response(s);
1160         break;
1161     case CMD_MSGACC:
1162         trace_esp_mem_writeb_cmd_msgacc(cmd);
1163         s->rregs[ESP_RINTR] |= INTR_DC;
1164         s->rregs[ESP_RSEQ] = 0;
1165         s->rregs[ESP_RFLAGS] = 0;
1166         esp_raise_irq(s);
1167         break;
1168     case CMD_PAD:
1169         trace_esp_mem_writeb_cmd_pad(cmd);
1170         handle_pad(s);
1171         break;
1172     case CMD_SATN:
1173         trace_esp_mem_writeb_cmd_satn(cmd);
1174         break;
1175     case CMD_RSTATN:
1176         trace_esp_mem_writeb_cmd_rstatn(cmd);
1177         break;
1178     case CMD_SEL:
1179         trace_esp_mem_writeb_cmd_sel(cmd);
1180         handle_s_without_atn(s);
1181         break;
1182     case CMD_SELATN:
1183         trace_esp_mem_writeb_cmd_selatn(cmd);
1184         handle_satn(s);
1185         break;
1186     case CMD_SELATNS:
1187         trace_esp_mem_writeb_cmd_selatns(cmd);
1188         handle_satn_stop(s);
1189         break;
1190     case CMD_ENSEL:
1191         trace_esp_mem_writeb_cmd_ensel(cmd);
1192         s->rregs[ESP_RINTR] = 0;
1193         break;
1194     case CMD_DISSEL:
1195         trace_esp_mem_writeb_cmd_dissel(cmd);
1196         s->rregs[ESP_RINTR] = 0;
1197         esp_raise_irq(s);
1198         break;
1199     default:
1200         trace_esp_error_unhandled_command(cmd);
1201         break;
1202     }
1203 }
1204 
1205 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1206 {
1207     uint32_t val;
1208 
1209     switch (saddr) {
1210     case ESP_FIFO:
1211         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1212         val = s->rregs[ESP_FIFO];
1213         break;
1214     case ESP_RINTR:
1215         /*
1216          * Clear sequence step, interrupt register and all status bits
1217          * except TC
1218          */
1219         val = s->rregs[ESP_RINTR];
1220         s->rregs[ESP_RINTR] = 0;
1221         esp_lower_irq(s);
1222         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1223         /*
1224          * According to the datasheet ESP_RSEQ should be cleared, but as the
1225          * emulation currently defers information transfers to the next TI
1226          * command leave it for now so that pedantic guests such as the old
1227          * Linux 2.6 driver see the correct flags before the next SCSI phase
1228          * transition.
1229          *
1230          * s->rregs[ESP_RSEQ] = SEQ_0;
1231          */
1232         break;
1233     case ESP_TCHI:
1234         /* Return the unique id if the value has never been written */
1235         if (!s->tchi_written) {
1236             val = s->chip_id;
1237         } else {
1238             val = s->rregs[saddr];
1239         }
1240         break;
1241      case ESP_RFLAGS:
1242         /* Bottom 5 bits indicate number of bytes in FIFO */
1243         val = fifo8_num_used(&s->fifo);
1244         break;
1245     default:
1246         val = s->rregs[saddr];
1247         break;
1248     }
1249 
1250     trace_esp_mem_readb(saddr, val);
1251     return val;
1252 }
1253 
1254 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1255 {
1256     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1257     switch (saddr) {
1258     case ESP_TCHI:
1259         s->tchi_written = true;
1260         /* fall through */
1261     case ESP_TCLO:
1262     case ESP_TCMID:
1263         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1264         break;
1265     case ESP_FIFO:
1266         if (!fifo8_is_full(&s->fifo)) {
1267             esp_fifo_push(s, val);
1268         }
1269         esp_do_nodma(s);
1270         break;
1271     case ESP_CMD:
1272         s->rregs[saddr] = val;
1273         esp_run_cmd(s);
1274         break;
1275     case ESP_WBUSID ... ESP_WSYNO:
1276         break;
1277     case ESP_CFG1:
1278     case ESP_CFG2: case ESP_CFG3:
1279     case ESP_RES3: case ESP_RES4:
1280         s->rregs[saddr] = val;
1281         break;
1282     case ESP_WCCF ... ESP_WTEST:
1283         break;
1284     default:
1285         trace_esp_error_invalid_write(val, saddr);
1286         return;
1287     }
1288     s->wregs[saddr] = val;
1289 }
1290 
1291 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1292                             unsigned size, bool is_write,
1293                             MemTxAttrs attrs)
1294 {
1295     return (size == 1) || (is_write && size == 4);
1296 }
1297 
1298 static bool esp_is_before_version_5(void *opaque, int version_id)
1299 {
1300     ESPState *s = ESP(opaque);
1301 
1302     version_id = MIN(version_id, s->mig_version_id);
1303     return version_id < 5;
1304 }
1305 
1306 static bool esp_is_version_5(void *opaque, int version_id)
1307 {
1308     ESPState *s = ESP(opaque);
1309 
1310     version_id = MIN(version_id, s->mig_version_id);
1311     return version_id >= 5;
1312 }
1313 
1314 static bool esp_is_version_6(void *opaque, int version_id)
1315 {
1316     ESPState *s = ESP(opaque);
1317 
1318     version_id = MIN(version_id, s->mig_version_id);
1319     return version_id >= 6;
1320 }
1321 
1322 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1323 {
1324     ESPState *s = ESP(opaque);
1325 
1326     version_id = MIN(version_id, s->mig_version_id);
1327     return version_id >= 5 && version_id <= 6;
1328 }
1329 
1330 int esp_pre_save(void *opaque)
1331 {
1332     ESPState *s = ESP(object_resolve_path_component(
1333                       OBJECT(opaque), "esp"));
1334 
1335     s->mig_version_id = vmstate_esp.version_id;
1336     return 0;
1337 }
1338 
1339 static int esp_post_load(void *opaque, int version_id)
1340 {
1341     ESPState *s = ESP(opaque);
1342     int len, i;
1343 
1344     version_id = MIN(version_id, s->mig_version_id);
1345 
1346     if (version_id < 5) {
1347         esp_set_tc(s, s->mig_dma_left);
1348 
1349         /* Migrate ti_buf to fifo */
1350         len = s->mig_ti_wptr - s->mig_ti_rptr;
1351         for (i = 0; i < len; i++) {
1352             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1353         }
1354 
1355         /* Migrate cmdbuf to cmdfifo */
1356         for (i = 0; i < s->mig_cmdlen; i++) {
1357             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1358         }
1359     }
1360 
1361     s->mig_version_id = vmstate_esp.version_id;
1362     return 0;
1363 }
1364 
1365 const VMStateDescription vmstate_esp = {
1366     .name = "esp",
1367     .version_id = 7,
1368     .minimum_version_id = 3,
1369     .post_load = esp_post_load,
1370     .fields = (const VMStateField[]) {
1371         VMSTATE_BUFFER(rregs, ESPState),
1372         VMSTATE_BUFFER(wregs, ESPState),
1373         VMSTATE_INT32(ti_size, ESPState),
1374         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1375         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1376         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1377         VMSTATE_UINT32(status, ESPState),
1378         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1379                             esp_is_before_version_5),
1380         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1381                           esp_is_before_version_5),
1382         VMSTATE_UINT32(dma, ESPState),
1383         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1384                               esp_is_before_version_5, 0, 16),
1385         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1386                               esp_is_before_version_5, 16,
1387                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1388         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1389         VMSTATE_UINT32(do_cmd, ESPState),
1390         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1391         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1392         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1393         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1394         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1395         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1396                            esp_is_between_version_5_and_6),
1397         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1398         VMSTATE_BOOL(drq_state, ESPState),
1399         VMSTATE_END_OF_LIST()
1400     },
1401 };
1402 
1403 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1404                                  uint64_t val, unsigned int size)
1405 {
1406     SysBusESPState *sysbus = opaque;
1407     ESPState *s = ESP(&sysbus->esp);
1408     uint32_t saddr;
1409 
1410     saddr = addr >> sysbus->it_shift;
1411     esp_reg_write(s, saddr, val);
1412 }
1413 
1414 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1415                                     unsigned int size)
1416 {
1417     SysBusESPState *sysbus = opaque;
1418     ESPState *s = ESP(&sysbus->esp);
1419     uint32_t saddr;
1420 
1421     saddr = addr >> sysbus->it_shift;
1422     return esp_reg_read(s, saddr);
1423 }
1424 
1425 static const MemoryRegionOps sysbus_esp_mem_ops = {
1426     .read = sysbus_esp_mem_read,
1427     .write = sysbus_esp_mem_write,
1428     .endianness = DEVICE_NATIVE_ENDIAN,
1429     .valid.accepts = esp_mem_accepts,
1430 };
1431 
1432 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1433                                   uint64_t val, unsigned int size)
1434 {
1435     SysBusESPState *sysbus = opaque;
1436     ESPState *s = ESP(&sysbus->esp);
1437 
1438     trace_esp_pdma_write(size);
1439 
1440     switch (size) {
1441     case 1:
1442         esp_pdma_write(s, val);
1443         break;
1444     case 2:
1445         esp_pdma_write(s, val >> 8);
1446         esp_pdma_write(s, val);
1447         break;
1448     }
1449     esp_do_dma(s);
1450 }
1451 
1452 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1453                                      unsigned int size)
1454 {
1455     SysBusESPState *sysbus = opaque;
1456     ESPState *s = ESP(&sysbus->esp);
1457     uint64_t val = 0;
1458 
1459     trace_esp_pdma_read(size);
1460 
1461     switch (size) {
1462     case 1:
1463         val = esp_pdma_read(s);
1464         break;
1465     case 2:
1466         val = esp_pdma_read(s);
1467         val = (val << 8) | esp_pdma_read(s);
1468         break;
1469     }
1470     esp_do_dma(s);
1471     return val;
1472 }
1473 
1474 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1475 {
1476     ESPState *s = container_of(req->bus, ESPState, bus);
1477 
1478     scsi_req_ref(req);
1479     s->current_req = req;
1480     return s;
1481 }
1482 
1483 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1484     .read = sysbus_esp_pdma_read,
1485     .write = sysbus_esp_pdma_write,
1486     .endianness = DEVICE_NATIVE_ENDIAN,
1487     .valid.min_access_size = 1,
1488     .valid.max_access_size = 4,
1489     .impl.min_access_size = 1,
1490     .impl.max_access_size = 2,
1491 };
1492 
1493 static const struct SCSIBusInfo esp_scsi_info = {
1494     .tcq = false,
1495     .max_target = ESP_MAX_DEVS,
1496     .max_lun = 7,
1497 
1498     .load_request = esp_load_request,
1499     .transfer_data = esp_transfer_data,
1500     .complete = esp_command_complete,
1501     .cancel = esp_request_cancelled
1502 };
1503 
1504 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1505 {
1506     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1507     ESPState *s = ESP(&sysbus->esp);
1508 
1509     switch (irq) {
1510     case 0:
1511         parent_esp_reset(s, irq, level);
1512         break;
1513     case 1:
1514         esp_dma_enable(s, irq, level);
1515         break;
1516     }
1517 }
1518 
1519 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1520 {
1521     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1522     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1523     ESPState *s = ESP(&sysbus->esp);
1524 
1525     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1526         return;
1527     }
1528 
1529     sysbus_init_irq(sbd, &s->irq);
1530     sysbus_init_irq(sbd, &s->drq_irq);
1531     assert(sysbus->it_shift != -1);
1532 
1533     s->chip_id = TCHI_FAS100A;
1534     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1535                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1536     sysbus_init_mmio(sbd, &sysbus->iomem);
1537     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1538                           sysbus, "esp-pdma", 4);
1539     sysbus_init_mmio(sbd, &sysbus->pdma);
1540 
1541     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1542 
1543     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1544 }
1545 
1546 static void sysbus_esp_hard_reset(DeviceState *dev)
1547 {
1548     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1549     ESPState *s = ESP(&sysbus->esp);
1550 
1551     esp_hard_reset(s);
1552 }
1553 
1554 static void sysbus_esp_init(Object *obj)
1555 {
1556     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1557 
1558     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1559 }
1560 
1561 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1562     .name = "sysbusespscsi",
1563     .version_id = 2,
1564     .minimum_version_id = 1,
1565     .pre_save = esp_pre_save,
1566     .fields = (const VMStateField[]) {
1567         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1568         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1569         VMSTATE_END_OF_LIST()
1570     }
1571 };
1572 
1573 static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
1574 {
1575     DeviceClass *dc = DEVICE_CLASS(klass);
1576 
1577     dc->realize = sysbus_esp_realize;
1578     device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1579     dc->vmsd = &vmstate_sysbus_esp_scsi;
1580     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1581 }
1582 
1583 static void esp_finalize(Object *obj)
1584 {
1585     ESPState *s = ESP(obj);
1586 
1587     fifo8_destroy(&s->fifo);
1588     fifo8_destroy(&s->cmdfifo);
1589 }
1590 
1591 static void esp_init(Object *obj)
1592 {
1593     ESPState *s = ESP(obj);
1594 
1595     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1596     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1597 }
1598 
1599 static void esp_class_init(ObjectClass *klass, const void *data)
1600 {
1601     DeviceClass *dc = DEVICE_CLASS(klass);
1602 
1603     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1604     dc->user_creatable = false;
1605     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1606 }
1607 
1608 static const TypeInfo esp_info_types[] = {
1609     {
1610         .name          = TYPE_SYSBUS_ESP,
1611         .parent        = TYPE_SYS_BUS_DEVICE,
1612         .instance_init = sysbus_esp_init,
1613         .instance_size = sizeof(SysBusESPState),
1614         .class_init    = sysbus_esp_class_init,
1615     },
1616     {
1617         .name = TYPE_ESP,
1618         .parent = TYPE_DEVICE,
1619         .instance_init = esp_init,
1620         .instance_finalize = esp_finalize,
1621         .instance_size = sizeof(ESPState),
1622         .class_init = esp_class_init,
1623     },
1624 };
1625 
1626 DEFINE_TYPES(esp_info_types)
1627