xref: /openbmc/qemu/hw/scsi/esp.c (revision 64c6e744)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 void esp_dma_enable(ESPState *s, int irq, int level)
83 {
84     if (level) {
85         s->dma_enabled = 1;
86         trace_esp_dma_enable();
87         if (s->dma_cb) {
88             s->dma_cb(s);
89             s->dma_cb = NULL;
90         }
91     } else {
92         trace_esp_dma_disable();
93         s->dma_enabled = 0;
94     }
95 }
96 
97 void esp_request_cancelled(SCSIRequest *req)
98 {
99     ESPState *s = req->hba_private;
100 
101     if (req == s->current_req) {
102         scsi_req_unref(s->current_req);
103         s->current_req = NULL;
104         s->current_dev = NULL;
105         s->async_len = 0;
106     }
107 }
108 
109 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
110 {
111     if (fifo8_num_used(fifo) == fifo->capacity) {
112         trace_esp_error_fifo_overrun();
113         return;
114     }
115 
116     fifo8_push(fifo, val);
117 }
118 
119 static uint8_t esp_fifo_pop(Fifo8 *fifo)
120 {
121     if (fifo8_is_empty(fifo)) {
122         return 0;
123     }
124 
125     return fifo8_pop(fifo);
126 }
127 
128 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
129 {
130     const uint8_t *buf;
131     uint32_t n, n2;
132     int len;
133 
134     if (maxlen == 0) {
135         return 0;
136     }
137 
138     len = maxlen;
139     buf = fifo8_pop_buf(fifo, len, &n);
140     if (dest) {
141         memcpy(dest, buf, n);
142     }
143 
144     /* Add FIFO wraparound if needed */
145     len -= n;
146     len = MIN(len, fifo8_num_used(fifo));
147     if (len) {
148         buf = fifo8_pop_buf(fifo, len, &n2);
149         if (dest) {
150             memcpy(&dest[n], buf, n2);
151         }
152         n += n2;
153     }
154 
155     return n;
156 }
157 
158 static uint32_t esp_get_tc(ESPState *s)
159 {
160     uint32_t dmalen;
161 
162     dmalen = s->rregs[ESP_TCLO];
163     dmalen |= s->rregs[ESP_TCMID] << 8;
164     dmalen |= s->rregs[ESP_TCHI] << 16;
165 
166     return dmalen;
167 }
168 
169 static void esp_set_tc(ESPState *s, uint32_t dmalen)
170 {
171     uint32_t old_tc = esp_get_tc(s);
172 
173     s->rregs[ESP_TCLO] = dmalen;
174     s->rregs[ESP_TCMID] = dmalen >> 8;
175     s->rregs[ESP_TCHI] = dmalen >> 16;
176 
177     if (old_tc && dmalen == 0) {
178         s->rregs[ESP_RSTAT] |= STAT_TC;
179     }
180 }
181 
182 static uint32_t esp_get_stc(ESPState *s)
183 {
184     uint32_t dmalen;
185 
186     dmalen = s->wregs[ESP_TCLO];
187     dmalen |= s->wregs[ESP_TCMID] << 8;
188     dmalen |= s->wregs[ESP_TCHI] << 16;
189 
190     return dmalen;
191 }
192 
193 static const char *esp_phase_names[8] = {
194     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
195     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
196 };
197 
198 static void esp_set_phase(ESPState *s, uint8_t phase)
199 {
200     s->rregs[ESP_RSTAT] &= ~7;
201     s->rregs[ESP_RSTAT] |= phase;
202 
203     trace_esp_set_phase(esp_phase_names[phase]);
204 }
205 
206 static uint8_t esp_get_phase(ESPState *s)
207 {
208     return s->rregs[ESP_RSTAT] & 7;
209 }
210 
211 static uint8_t esp_pdma_read(ESPState *s)
212 {
213     uint8_t val;
214 
215     val = esp_fifo_pop(&s->fifo);
216     return val;
217 }
218 
219 static void esp_pdma_write(ESPState *s, uint8_t val)
220 {
221     uint32_t dmalen = esp_get_tc(s);
222 
223     if (dmalen == 0) {
224         return;
225     }
226 
227     esp_fifo_push(&s->fifo, val);
228 
229     dmalen--;
230     esp_set_tc(s, dmalen);
231 }
232 
233 static int esp_select(ESPState *s)
234 {
235     int target;
236 
237     target = s->wregs[ESP_WBUSID] & BUSID_DID;
238 
239     s->ti_size = 0;
240     s->rregs[ESP_RSEQ] = SEQ_0;
241 
242     if (s->current_req) {
243         /* Started a new command before the old one finished. Cancel it. */
244         scsi_req_cancel(s->current_req);
245     }
246 
247     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
248     if (!s->current_dev) {
249         /* No such drive */
250         s->rregs[ESP_RSTAT] = 0;
251         s->rregs[ESP_RINTR] = INTR_DC;
252         esp_raise_irq(s);
253         return -1;
254     }
255 
256     /*
257      * Note that we deliberately don't raise the IRQ here: this will be done
258      * either in esp_transfer_data() or esp_command_complete()
259      */
260     return 0;
261 }
262 
263 static void esp_do_dma(ESPState *s);
264 static void esp_do_nodma(ESPState *s);
265 
266 static void do_command_phase(ESPState *s)
267 {
268     uint32_t cmdlen;
269     int32_t datalen;
270     SCSIDevice *current_lun;
271     uint8_t buf[ESP_CMDFIFO_SZ];
272 
273     trace_esp_do_command_phase(s->lun);
274     cmdlen = fifo8_num_used(&s->cmdfifo);
275     if (!cmdlen || !s->current_dev) {
276         return;
277     }
278     esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
279 
280     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
281     if (!current_lun) {
282         /* No such drive */
283         s->rregs[ESP_RSTAT] = 0;
284         s->rregs[ESP_RINTR] = INTR_DC;
285         s->rregs[ESP_RSEQ] = SEQ_0;
286         esp_raise_irq(s);
287         return;
288     }
289 
290     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
291     datalen = scsi_req_enqueue(s->current_req);
292     s->ti_size = datalen;
293     fifo8_reset(&s->cmdfifo);
294     s->data_ready = false;
295     if (datalen != 0) {
296         /*
297          * Switch to DATA phase but wait until initial data xfer is
298          * complete before raising the command completion interrupt
299          */
300         if (datalen > 0) {
301             esp_set_phase(s, STAT_DI);
302         } else {
303             esp_set_phase(s, STAT_DO);
304         }
305         scsi_req_continue(s->current_req);
306         return;
307     }
308 }
309 
310 static void do_message_phase(ESPState *s)
311 {
312     if (s->cmdfifo_cdb_offset) {
313         uint8_t message = esp_fifo_pop(&s->cmdfifo);
314 
315         trace_esp_do_identify(message);
316         s->lun = message & 7;
317         s->cmdfifo_cdb_offset--;
318     }
319 
320     /* Ignore extended messages for now */
321     if (s->cmdfifo_cdb_offset) {
322         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
323         esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
324         s->cmdfifo_cdb_offset = 0;
325     }
326 }
327 
328 static void do_cmd(ESPState *s)
329 {
330     do_message_phase(s);
331     assert(s->cmdfifo_cdb_offset == 0);
332     do_command_phase(s);
333 }
334 
335 static void handle_satn(ESPState *s)
336 {
337     if (s->dma && !s->dma_enabled) {
338         s->dma_cb = handle_satn;
339         return;
340     }
341 
342     if (esp_select(s) < 0) {
343         return;
344     }
345 
346     esp_set_phase(s, STAT_MO);
347 
348     if (s->dma) {
349         esp_do_dma(s);
350     } else {
351         esp_do_nodma(s);
352     }
353 }
354 
355 static void handle_s_without_atn(ESPState *s)
356 {
357     if (s->dma && !s->dma_enabled) {
358         s->dma_cb = handle_s_without_atn;
359         return;
360     }
361 
362     if (esp_select(s) < 0) {
363         return;
364     }
365 
366     esp_set_phase(s, STAT_CD);
367     s->cmdfifo_cdb_offset = 0;
368 
369     if (s->dma) {
370         esp_do_dma(s);
371     } else {
372         esp_do_nodma(s);
373     }
374 }
375 
376 static void handle_satn_stop(ESPState *s)
377 {
378     if (s->dma && !s->dma_enabled) {
379         s->dma_cb = handle_satn_stop;
380         return;
381     }
382 
383     if (esp_select(s) < 0) {
384         return;
385     }
386 
387     esp_set_phase(s, STAT_MO);
388     s->cmdfifo_cdb_offset = 0;
389 
390     if (s->dma) {
391         esp_do_dma(s);
392     } else {
393         esp_do_nodma(s);
394     }
395 }
396 
397 static void handle_pad(ESPState *s)
398 {
399     if (s->dma) {
400         esp_do_dma(s);
401     } else {
402         esp_do_nodma(s);
403     }
404 }
405 
406 static void write_response(ESPState *s)
407 {
408     trace_esp_write_response(s->status);
409 
410     if (s->dma) {
411         esp_do_dma(s);
412     } else {
413         esp_do_nodma(s);
414     }
415 }
416 
417 static int esp_cdb_length(ESPState *s)
418 {
419     const uint8_t *pbuf;
420     int cmdlen, len;
421 
422     cmdlen = fifo8_num_used(&s->cmdfifo);
423     if (cmdlen < s->cmdfifo_cdb_offset) {
424         return 0;
425     }
426 
427     pbuf = fifo8_peek_buf(&s->cmdfifo, cmdlen, NULL);
428     len = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
429 
430     return len;
431 }
432 
433 static void esp_dma_ti_check(ESPState *s)
434 {
435     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
436         s->rregs[ESP_RINTR] |= INTR_BS;
437         esp_raise_irq(s);
438         esp_lower_drq(s);
439     }
440 }
441 
442 static void esp_do_dma(ESPState *s)
443 {
444     uint32_t len, cmdlen;
445     uint8_t buf[ESP_CMDFIFO_SZ];
446 
447     len = esp_get_tc(s);
448 
449     switch (esp_get_phase(s)) {
450     case STAT_MO:
451         if (s->dma_memory_read) {
452             len = MIN(len, fifo8_num_free(&s->cmdfifo));
453             s->dma_memory_read(s->dma_opaque, buf, len);
454             esp_set_tc(s, esp_get_tc(s) - len);
455         } else {
456             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
457             len = MIN(fifo8_num_free(&s->cmdfifo), len);
458             esp_raise_drq(s);
459         }
460 
461         fifo8_push_all(&s->cmdfifo, buf, len);
462         s->cmdfifo_cdb_offset += len;
463 
464         switch (s->rregs[ESP_CMD]) {
465         case CMD_SELATN | CMD_DMA:
466             if (fifo8_num_used(&s->cmdfifo) >= 1) {
467                 /* First byte received, switch to command phase */
468                 esp_set_phase(s, STAT_CD);
469                 s->rregs[ESP_RSEQ] = SEQ_CD;
470                 s->cmdfifo_cdb_offset = 1;
471 
472                 if (fifo8_num_used(&s->cmdfifo) > 1) {
473                     /* Process any additional command phase data */
474                     esp_do_dma(s);
475                 }
476             }
477             break;
478 
479         case CMD_SELATNS | CMD_DMA:
480             if (fifo8_num_used(&s->cmdfifo) == 1) {
481                 /* First byte received, stop in message out phase */
482                 s->rregs[ESP_RSEQ] = SEQ_MO;
483                 s->cmdfifo_cdb_offset = 1;
484 
485                 /* Raise command completion interrupt */
486                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
487                 esp_raise_irq(s);
488             }
489             break;
490 
491         case CMD_TI | CMD_DMA:
492             /* ATN remains asserted until TC == 0 */
493             if (esp_get_tc(s) == 0) {
494                 esp_set_phase(s, STAT_CD);
495                 s->rregs[ESP_CMD] = 0;
496                 s->rregs[ESP_RINTR] |= INTR_BS;
497                 esp_raise_irq(s);
498             }
499             break;
500         }
501         break;
502 
503     case STAT_CD:
504         cmdlen = fifo8_num_used(&s->cmdfifo);
505         trace_esp_do_dma(cmdlen, len);
506         if (s->dma_memory_read) {
507             len = MIN(len, fifo8_num_free(&s->cmdfifo));
508             s->dma_memory_read(s->dma_opaque, buf, len);
509             fifo8_push_all(&s->cmdfifo, buf, len);
510             esp_set_tc(s, esp_get_tc(s) - len);
511         } else {
512             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
513             len = MIN(fifo8_num_free(&s->cmdfifo), len);
514             fifo8_push_all(&s->cmdfifo, buf, len);
515             esp_raise_drq(s);
516         }
517         trace_esp_handle_ti_cmd(cmdlen);
518         s->ti_size = 0;
519         if (esp_get_tc(s) == 0) {
520             /* Command has been received */
521             do_cmd(s);
522         }
523         break;
524 
525     case STAT_DO:
526         if (!s->current_req) {
527             return;
528         }
529         if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
530             /* Defer until data is available.  */
531             return;
532         }
533         if (len > s->async_len) {
534             len = s->async_len;
535         }
536 
537         switch (s->rregs[ESP_CMD]) {
538         case CMD_TI | CMD_DMA:
539             if (s->dma_memory_read) {
540                 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
541                 esp_set_tc(s, esp_get_tc(s) - len);
542             } else {
543                 /* Copy FIFO data to device */
544                 len = MIN(s->async_len, ESP_FIFO_SZ);
545                 len = MIN(len, fifo8_num_used(&s->fifo));
546                 len = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
547                 esp_raise_drq(s);
548             }
549 
550             s->async_buf += len;
551             s->async_len -= len;
552             s->ti_size += len;
553             break;
554 
555         case CMD_PAD | CMD_DMA:
556             /* Copy TC zero bytes into the incoming stream */
557             if (!s->dma_memory_read) {
558                 len = MIN(s->async_len, ESP_FIFO_SZ);
559                 len = MIN(len, fifo8_num_free(&s->fifo));
560             }
561 
562             memset(s->async_buf, 0, len);
563 
564             s->async_buf += len;
565             s->async_len -= len;
566             s->ti_size += len;
567             break;
568         }
569 
570         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
571             /* Defer until the scsi layer has completed */
572             scsi_req_continue(s->current_req);
573             return;
574         }
575 
576         esp_dma_ti_check(s);
577         break;
578 
579     case STAT_DI:
580         if (!s->current_req) {
581             return;
582         }
583         if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
584             /* Defer until data is available.  */
585             return;
586         }
587         if (len > s->async_len) {
588             len = s->async_len;
589         }
590 
591         switch (s->rregs[ESP_CMD]) {
592         case CMD_TI | CMD_DMA:
593             if (s->dma_memory_write) {
594                 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
595             } else {
596                 /* Copy device data to FIFO */
597                 len = MIN(len, fifo8_num_free(&s->fifo));
598                 fifo8_push_all(&s->fifo, s->async_buf, len);
599                 esp_raise_drq(s);
600             }
601 
602             s->async_buf += len;
603             s->async_len -= len;
604             s->ti_size -= len;
605             esp_set_tc(s, esp_get_tc(s) - len);
606             break;
607 
608         case CMD_PAD | CMD_DMA:
609             /* Drop TC bytes from the incoming stream */
610             if (!s->dma_memory_write) {
611                 len = MIN(len, fifo8_num_free(&s->fifo));
612             }
613 
614             s->async_buf += len;
615             s->async_len -= len;
616             s->ti_size -= len;
617             esp_set_tc(s, esp_get_tc(s) - len);
618             break;
619         }
620 
621         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
622             /* If the guest underflows TC then terminate SCSI request */
623             scsi_req_continue(s->current_req);
624             return;
625         }
626 
627         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
628             /* Defer until the scsi layer has completed */
629             scsi_req_continue(s->current_req);
630             return;
631         }
632 
633         esp_dma_ti_check(s);
634         break;
635 
636     case STAT_ST:
637         switch (s->rregs[ESP_CMD]) {
638         case CMD_ICCS | CMD_DMA:
639             len = MIN(len, 1);
640 
641             if (len) {
642                 buf[0] = s->status;
643 
644                 if (s->dma_memory_write) {
645                     s->dma_memory_write(s->dma_opaque, buf, len);
646                 } else {
647                     fifo8_push_all(&s->fifo, buf, len);
648                 }
649 
650                 esp_set_tc(s, esp_get_tc(s) - len);
651                 esp_set_phase(s, STAT_MI);
652 
653                 if (esp_get_tc(s) > 0) {
654                     /* Process any message in phase data */
655                     esp_do_dma(s);
656                 }
657             }
658             break;
659 
660         default:
661             /* Consume remaining data if the guest underflows TC */
662             if (fifo8_num_used(&s->fifo) < 2) {
663                 s->rregs[ESP_RINTR] |= INTR_BS;
664                 esp_raise_irq(s);
665                 esp_lower_drq(s);
666             }
667             break;
668         }
669         break;
670 
671     case STAT_MI:
672         switch (s->rregs[ESP_CMD]) {
673         case CMD_ICCS | CMD_DMA:
674             len = MIN(len, 1);
675 
676             if (len) {
677                 buf[0] = 0;
678 
679                 if (s->dma_memory_write) {
680                     s->dma_memory_write(s->dma_opaque, buf, len);
681                 } else {
682                     fifo8_push_all(&s->fifo, buf, len);
683                 }
684 
685                 esp_set_tc(s, esp_get_tc(s) - len);
686 
687                 /* Raise end of command interrupt */
688                 s->rregs[ESP_RINTR] |= INTR_FC;
689                 esp_raise_irq(s);
690             }
691             break;
692         }
693         break;
694     }
695 }
696 
697 static void esp_nodma_ti_dataout(ESPState *s)
698 {
699     int len;
700 
701     if (!s->current_req) {
702         return;
703     }
704     if (s->async_len == 0) {
705         /* Defer until data is available.  */
706         return;
707     }
708     len = MIN(s->async_len, ESP_FIFO_SZ);
709     len = MIN(len, fifo8_num_used(&s->fifo));
710     esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
711     s->async_buf += len;
712     s->async_len -= len;
713     s->ti_size += len;
714 
715     if (s->async_len == 0) {
716         scsi_req_continue(s->current_req);
717         return;
718     }
719 
720     s->rregs[ESP_RINTR] |= INTR_BS;
721     esp_raise_irq(s);
722 }
723 
724 static void esp_do_nodma(ESPState *s)
725 {
726     uint8_t buf[ESP_FIFO_SZ];
727     uint32_t cmdlen;
728     int len;
729 
730     switch (esp_get_phase(s)) {
731     case STAT_MO:
732         switch (s->rregs[ESP_CMD]) {
733         case CMD_SELATN:
734             /* Copy FIFO into cmdfifo */
735             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
736             len = MIN(fifo8_num_free(&s->cmdfifo), len);
737             fifo8_push_all(&s->cmdfifo, buf, len);
738 
739             if (fifo8_num_used(&s->cmdfifo) >= 1) {
740                 /* First byte received, switch to command phase */
741                 esp_set_phase(s, STAT_CD);
742                 s->rregs[ESP_RSEQ] = SEQ_CD;
743                 s->cmdfifo_cdb_offset = 1;
744 
745                 if (fifo8_num_used(&s->cmdfifo) > 1) {
746                     /* Process any additional command phase data */
747                     esp_do_nodma(s);
748                 }
749             }
750             break;
751 
752         case CMD_SELATNS:
753             /* Copy one byte from FIFO into cmdfifo */
754             len = esp_fifo_pop_buf(&s->fifo, buf, 1);
755             len = MIN(fifo8_num_free(&s->cmdfifo), len);
756             fifo8_push_all(&s->cmdfifo, buf, len);
757 
758             if (fifo8_num_used(&s->cmdfifo) >= 1) {
759                 /* First byte received, stop in message out phase */
760                 s->rregs[ESP_RSEQ] = SEQ_MO;
761                 s->cmdfifo_cdb_offset = 1;
762 
763                 /* Raise command completion interrupt */
764                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
765                 esp_raise_irq(s);
766             }
767             break;
768 
769         case CMD_TI:
770             /* Copy FIFO into cmdfifo */
771             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
772             len = MIN(fifo8_num_free(&s->cmdfifo), len);
773             fifo8_push_all(&s->cmdfifo, buf, len);
774 
775             /* ATN remains asserted until FIFO empty */
776             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
777             esp_set_phase(s, STAT_CD);
778             s->rregs[ESP_CMD] = 0;
779             s->rregs[ESP_RINTR] |= INTR_BS;
780             esp_raise_irq(s);
781             break;
782         }
783         break;
784 
785     case STAT_CD:
786         switch (s->rregs[ESP_CMD]) {
787         case CMD_TI:
788             /* Copy FIFO into cmdfifo */
789             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
790             len = MIN(fifo8_num_free(&s->cmdfifo), len);
791             fifo8_push_all(&s->cmdfifo, buf, len);
792 
793             cmdlen = fifo8_num_used(&s->cmdfifo);
794             trace_esp_handle_ti_cmd(cmdlen);
795 
796             /* CDB may be transferred in one or more TI commands */
797             if (esp_cdb_length(s) && esp_cdb_length(s) ==
798                 fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset) {
799                     /* Command has been received */
800                     do_cmd(s);
801             } else {
802                 /*
803                  * If data was transferred from the FIFO then raise bus
804                  * service interrupt to indicate transfer complete. Otherwise
805                  * defer until the next FIFO write.
806                  */
807                 if (len) {
808                     /* Raise interrupt to indicate transfer complete */
809                     s->rregs[ESP_RINTR] |= INTR_BS;
810                     esp_raise_irq(s);
811                 }
812             }
813             break;
814 
815         case CMD_SEL | CMD_DMA:
816         case CMD_SELATN | CMD_DMA:
817             /* Copy FIFO into cmdfifo */
818             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
819             len = MIN(fifo8_num_free(&s->cmdfifo), len);
820             fifo8_push_all(&s->cmdfifo, buf, len);
821 
822             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
823             if (esp_cdb_length(s) && esp_cdb_length(s) ==
824                 fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset) {
825                     /* Command has been received */
826                     do_cmd(s);
827             }
828             break;
829 
830         case CMD_SEL:
831         case CMD_SELATN:
832             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
833             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
834             len = MIN(fifo8_num_free(&s->cmdfifo), len);
835             fifo8_push_all(&s->cmdfifo, buf, len);
836 
837             do_cmd(s);
838             break;
839         }
840         break;
841 
842     case STAT_DO:
843         /* Accumulate data in FIFO until non-DMA TI is executed */
844         break;
845 
846     case STAT_DI:
847         if (!s->current_req) {
848             return;
849         }
850         if (s->async_len == 0) {
851             /* Defer until data is available.  */
852             return;
853         }
854         if (fifo8_is_empty(&s->fifo)) {
855             fifo8_push(&s->fifo, s->async_buf[0]);
856             s->async_buf++;
857             s->async_len--;
858             s->ti_size--;
859         }
860 
861         if (s->async_len == 0) {
862             scsi_req_continue(s->current_req);
863             return;
864         }
865 
866         /* If preloading the FIFO, defer until TI command issued */
867         if (s->rregs[ESP_CMD] != CMD_TI) {
868             return;
869         }
870 
871         s->rregs[ESP_RINTR] |= INTR_BS;
872         esp_raise_irq(s);
873         break;
874 
875     case STAT_ST:
876         switch (s->rregs[ESP_CMD]) {
877         case CMD_ICCS:
878             fifo8_push(&s->fifo, s->status);
879             esp_set_phase(s, STAT_MI);
880 
881             /* Process any message in phase data */
882             esp_do_nodma(s);
883             break;
884         }
885         break;
886 
887     case STAT_MI:
888         switch (s->rregs[ESP_CMD]) {
889         case CMD_ICCS:
890             fifo8_push(&s->fifo, 0);
891 
892             /* Raise end of command interrupt */
893             s->rregs[ESP_RINTR] |= INTR_FC;
894             esp_raise_irq(s);
895             break;
896         }
897         break;
898     }
899 }
900 
901 void esp_command_complete(SCSIRequest *req, size_t resid)
902 {
903     ESPState *s = req->hba_private;
904     int to_device = (esp_get_phase(s) == STAT_DO);
905 
906     trace_esp_command_complete();
907 
908     /*
909      * Non-DMA transfers from the target will leave the last byte in
910      * the FIFO so don't reset ti_size in this case
911      */
912     if (s->dma || to_device) {
913         if (s->ti_size != 0) {
914             trace_esp_command_complete_unexpected();
915         }
916     }
917 
918     s->async_len = 0;
919     if (req->status) {
920         trace_esp_command_complete_fail();
921     }
922     s->status = req->status;
923 
924     /*
925      * Switch to status phase. For non-DMA transfers from the target the last
926      * byte is still in the FIFO
927      */
928     s->ti_size = 0;
929 
930     switch (s->rregs[ESP_CMD]) {
931     case CMD_SEL | CMD_DMA:
932     case CMD_SEL:
933     case CMD_SELATN | CMD_DMA:
934     case CMD_SELATN:
935         /*
936          * No data phase for sequencer command so raise deferred bus service
937          * and function complete interrupt
938          */
939         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
940         s->rregs[ESP_RSEQ] = SEQ_CD;
941         break;
942 
943     case CMD_TI | CMD_DMA:
944     case CMD_TI:
945         s->rregs[ESP_CMD] = 0;
946         break;
947     }
948 
949     /* Raise bus service interrupt to indicate change to STATUS phase */
950     esp_set_phase(s, STAT_ST);
951     s->rregs[ESP_RINTR] |= INTR_BS;
952     esp_raise_irq(s);
953 
954     /* Ensure DRQ is set correctly for TC underflow or normal completion */
955     esp_dma_ti_check(s);
956 
957     if (s->current_req) {
958         scsi_req_unref(s->current_req);
959         s->current_req = NULL;
960         s->current_dev = NULL;
961     }
962 }
963 
964 void esp_transfer_data(SCSIRequest *req, uint32_t len)
965 {
966     ESPState *s = req->hba_private;
967     uint32_t dmalen = esp_get_tc(s);
968 
969     trace_esp_transfer_data(dmalen, s->ti_size);
970     s->async_len = len;
971     s->async_buf = scsi_req_get_buf(req);
972 
973     if (!s->data_ready) {
974         s->data_ready = true;
975 
976         switch (s->rregs[ESP_CMD]) {
977         case CMD_SEL | CMD_DMA:
978         case CMD_SEL:
979         case CMD_SELATN | CMD_DMA:
980         case CMD_SELATN:
981             /*
982              * Initial incoming data xfer is complete for sequencer command
983              * so raise deferred bus service and function complete interrupt
984              */
985              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
986              s->rregs[ESP_RSEQ] = SEQ_CD;
987              break;
988 
989         case CMD_SELATNS | CMD_DMA:
990         case CMD_SELATNS:
991             /*
992              * Initial incoming data xfer is complete so raise command
993              * completion interrupt
994              */
995              s->rregs[ESP_RINTR] |= INTR_BS;
996              s->rregs[ESP_RSEQ] = SEQ_MO;
997              break;
998 
999         case CMD_TI | CMD_DMA:
1000         case CMD_TI:
1001             /*
1002              * Bus service interrupt raised because of initial change to
1003              * DATA phase
1004              */
1005             s->rregs[ESP_CMD] = 0;
1006             s->rregs[ESP_RINTR] |= INTR_BS;
1007             break;
1008         }
1009 
1010         esp_raise_irq(s);
1011     }
1012 
1013     /*
1014      * Always perform the initial transfer upon reception of the next TI
1015      * command to ensure the DMA/non-DMA status of the command is correct.
1016      * It is not possible to use s->dma directly in the section below as
1017      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1018      * async data transfer is delayed then s->dma is set incorrectly.
1019      */
1020 
1021     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1022         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1023         esp_dma_ti_check(s);
1024 
1025         esp_do_dma(s);
1026     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1027         esp_do_nodma(s);
1028     }
1029 }
1030 
1031 static void handle_ti(ESPState *s)
1032 {
1033     uint32_t dmalen;
1034 
1035     if (s->dma && !s->dma_enabled) {
1036         s->dma_cb = handle_ti;
1037         return;
1038     }
1039 
1040     if (s->dma) {
1041         dmalen = esp_get_tc(s);
1042         trace_esp_handle_ti(dmalen);
1043         esp_do_dma(s);
1044     } else {
1045         trace_esp_handle_ti(s->ti_size);
1046         esp_do_nodma(s);
1047 
1048         if (esp_get_phase(s) == STAT_DO) {
1049             esp_nodma_ti_dataout(s);
1050         }
1051     }
1052 }
1053 
1054 void esp_hard_reset(ESPState *s)
1055 {
1056     memset(s->rregs, 0, ESP_REGS);
1057     memset(s->wregs, 0, ESP_REGS);
1058     s->tchi_written = 0;
1059     s->ti_size = 0;
1060     s->async_len = 0;
1061     fifo8_reset(&s->fifo);
1062     fifo8_reset(&s->cmdfifo);
1063     s->dma = 0;
1064     s->dma_cb = NULL;
1065 
1066     s->rregs[ESP_CFG1] = 7;
1067 }
1068 
1069 static void esp_soft_reset(ESPState *s)
1070 {
1071     qemu_irq_lower(s->irq);
1072     qemu_irq_lower(s->drq_irq);
1073     esp_hard_reset(s);
1074 }
1075 
1076 static void esp_bus_reset(ESPState *s)
1077 {
1078     bus_cold_reset(BUS(&s->bus));
1079 }
1080 
1081 static void parent_esp_reset(ESPState *s, int irq, int level)
1082 {
1083     if (level) {
1084         esp_soft_reset(s);
1085     }
1086 }
1087 
1088 static void esp_run_cmd(ESPState *s)
1089 {
1090     uint8_t cmd = s->rregs[ESP_CMD];
1091 
1092     if (cmd & CMD_DMA) {
1093         s->dma = 1;
1094         /* Reload DMA counter.  */
1095         if (esp_get_stc(s) == 0) {
1096             esp_set_tc(s, 0x10000);
1097         } else {
1098             esp_set_tc(s, esp_get_stc(s));
1099         }
1100     } else {
1101         s->dma = 0;
1102     }
1103     switch (cmd & CMD_CMD) {
1104     case CMD_NOP:
1105         trace_esp_mem_writeb_cmd_nop(cmd);
1106         break;
1107     case CMD_FLUSH:
1108         trace_esp_mem_writeb_cmd_flush(cmd);
1109         fifo8_reset(&s->fifo);
1110         break;
1111     case CMD_RESET:
1112         trace_esp_mem_writeb_cmd_reset(cmd);
1113         esp_soft_reset(s);
1114         break;
1115     case CMD_BUSRESET:
1116         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1117         esp_bus_reset(s);
1118         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1119             s->rregs[ESP_RINTR] |= INTR_RST;
1120             esp_raise_irq(s);
1121         }
1122         break;
1123     case CMD_TI:
1124         trace_esp_mem_writeb_cmd_ti(cmd);
1125         handle_ti(s);
1126         break;
1127     case CMD_ICCS:
1128         trace_esp_mem_writeb_cmd_iccs(cmd);
1129         write_response(s);
1130         break;
1131     case CMD_MSGACC:
1132         trace_esp_mem_writeb_cmd_msgacc(cmd);
1133         s->rregs[ESP_RINTR] |= INTR_DC;
1134         s->rregs[ESP_RSEQ] = 0;
1135         s->rregs[ESP_RFLAGS] = 0;
1136         esp_raise_irq(s);
1137         break;
1138     case CMD_PAD:
1139         trace_esp_mem_writeb_cmd_pad(cmd);
1140         handle_pad(s);
1141         break;
1142     case CMD_SATN:
1143         trace_esp_mem_writeb_cmd_satn(cmd);
1144         break;
1145     case CMD_RSTATN:
1146         trace_esp_mem_writeb_cmd_rstatn(cmd);
1147         break;
1148     case CMD_SEL:
1149         trace_esp_mem_writeb_cmd_sel(cmd);
1150         handle_s_without_atn(s);
1151         break;
1152     case CMD_SELATN:
1153         trace_esp_mem_writeb_cmd_selatn(cmd);
1154         handle_satn(s);
1155         break;
1156     case CMD_SELATNS:
1157         trace_esp_mem_writeb_cmd_selatns(cmd);
1158         handle_satn_stop(s);
1159         break;
1160     case CMD_ENSEL:
1161         trace_esp_mem_writeb_cmd_ensel(cmd);
1162         s->rregs[ESP_RINTR] = 0;
1163         break;
1164     case CMD_DISSEL:
1165         trace_esp_mem_writeb_cmd_dissel(cmd);
1166         s->rregs[ESP_RINTR] = 0;
1167         esp_raise_irq(s);
1168         break;
1169     default:
1170         trace_esp_error_unhandled_command(cmd);
1171         break;
1172     }
1173 }
1174 
1175 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1176 {
1177     uint32_t val;
1178 
1179     switch (saddr) {
1180     case ESP_FIFO:
1181         s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
1182         val = s->rregs[ESP_FIFO];
1183         break;
1184     case ESP_RINTR:
1185         /*
1186          * Clear sequence step, interrupt register and all status bits
1187          * except TC
1188          */
1189         val = s->rregs[ESP_RINTR];
1190         s->rregs[ESP_RINTR] = 0;
1191         esp_lower_irq(s);
1192         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1193         /*
1194          * According to the datasheet ESP_RSEQ should be cleared, but as the
1195          * emulation currently defers information transfers to the next TI
1196          * command leave it for now so that pedantic guests such as the old
1197          * Linux 2.6 driver see the correct flags before the next SCSI phase
1198          * transition.
1199          *
1200          * s->rregs[ESP_RSEQ] = SEQ_0;
1201          */
1202         break;
1203     case ESP_TCHI:
1204         /* Return the unique id if the value has never been written */
1205         if (!s->tchi_written) {
1206             val = s->chip_id;
1207         } else {
1208             val = s->rregs[saddr];
1209         }
1210         break;
1211      case ESP_RFLAGS:
1212         /* Bottom 5 bits indicate number of bytes in FIFO */
1213         val = fifo8_num_used(&s->fifo);
1214         break;
1215     default:
1216         val = s->rregs[saddr];
1217         break;
1218     }
1219 
1220     trace_esp_mem_readb(saddr, val);
1221     return val;
1222 }
1223 
1224 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1225 {
1226     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1227     switch (saddr) {
1228     case ESP_TCHI:
1229         s->tchi_written = true;
1230         /* fall through */
1231     case ESP_TCLO:
1232     case ESP_TCMID:
1233         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1234         break;
1235     case ESP_FIFO:
1236         if (!fifo8_is_full(&s->fifo)) {
1237             esp_fifo_push(&s->fifo, val);
1238         }
1239         esp_do_nodma(s);
1240         break;
1241     case ESP_CMD:
1242         s->rregs[saddr] = val;
1243         esp_run_cmd(s);
1244         break;
1245     case ESP_WBUSID ... ESP_WSYNO:
1246         break;
1247     case ESP_CFG1:
1248     case ESP_CFG2: case ESP_CFG3:
1249     case ESP_RES3: case ESP_RES4:
1250         s->rregs[saddr] = val;
1251         break;
1252     case ESP_WCCF ... ESP_WTEST:
1253         break;
1254     default:
1255         trace_esp_error_invalid_write(val, saddr);
1256         return;
1257     }
1258     s->wregs[saddr] = val;
1259 }
1260 
1261 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1262                             unsigned size, bool is_write,
1263                             MemTxAttrs attrs)
1264 {
1265     return (size == 1) || (is_write && size == 4);
1266 }
1267 
1268 static bool esp_is_before_version_5(void *opaque, int version_id)
1269 {
1270     ESPState *s = ESP(opaque);
1271 
1272     version_id = MIN(version_id, s->mig_version_id);
1273     return version_id < 5;
1274 }
1275 
1276 static bool esp_is_version_5(void *opaque, int version_id)
1277 {
1278     ESPState *s = ESP(opaque);
1279 
1280     version_id = MIN(version_id, s->mig_version_id);
1281     return version_id >= 5;
1282 }
1283 
1284 static bool esp_is_version_6(void *opaque, int version_id)
1285 {
1286     ESPState *s = ESP(opaque);
1287 
1288     version_id = MIN(version_id, s->mig_version_id);
1289     return version_id >= 6;
1290 }
1291 
1292 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1293 {
1294     ESPState *s = ESP(opaque);
1295 
1296     version_id = MIN(version_id, s->mig_version_id);
1297     return version_id >= 5 && version_id <= 6;
1298 }
1299 
1300 int esp_pre_save(void *opaque)
1301 {
1302     ESPState *s = ESP(object_resolve_path_component(
1303                       OBJECT(opaque), "esp"));
1304 
1305     s->mig_version_id = vmstate_esp.version_id;
1306     return 0;
1307 }
1308 
1309 static int esp_post_load(void *opaque, int version_id)
1310 {
1311     ESPState *s = ESP(opaque);
1312     int len, i;
1313 
1314     version_id = MIN(version_id, s->mig_version_id);
1315 
1316     if (version_id < 5) {
1317         esp_set_tc(s, s->mig_dma_left);
1318 
1319         /* Migrate ti_buf to fifo */
1320         len = s->mig_ti_wptr - s->mig_ti_rptr;
1321         for (i = 0; i < len; i++) {
1322             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1323         }
1324 
1325         /* Migrate cmdbuf to cmdfifo */
1326         for (i = 0; i < s->mig_cmdlen; i++) {
1327             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1328         }
1329     }
1330 
1331     s->mig_version_id = vmstate_esp.version_id;
1332     return 0;
1333 }
1334 
1335 const VMStateDescription vmstate_esp = {
1336     .name = "esp",
1337     .version_id = 7,
1338     .minimum_version_id = 3,
1339     .post_load = esp_post_load,
1340     .fields = (const VMStateField[]) {
1341         VMSTATE_BUFFER(rregs, ESPState),
1342         VMSTATE_BUFFER(wregs, ESPState),
1343         VMSTATE_INT32(ti_size, ESPState),
1344         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1345         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1346         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1347         VMSTATE_UINT32(status, ESPState),
1348         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1349                             esp_is_before_version_5),
1350         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1351                           esp_is_before_version_5),
1352         VMSTATE_UINT32(dma, ESPState),
1353         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1354                               esp_is_before_version_5, 0, 16),
1355         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1356                               esp_is_before_version_5, 16,
1357                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1358         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1359         VMSTATE_UINT32(do_cmd, ESPState),
1360         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1361         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1362         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1363         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1364         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1365         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1366                            esp_is_between_version_5_and_6),
1367         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1368         VMSTATE_BOOL(drq_state, ESPState),
1369         VMSTATE_END_OF_LIST()
1370     },
1371 };
1372 
1373 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1374                                  uint64_t val, unsigned int size)
1375 {
1376     SysBusESPState *sysbus = opaque;
1377     ESPState *s = ESP(&sysbus->esp);
1378     uint32_t saddr;
1379 
1380     saddr = addr >> sysbus->it_shift;
1381     esp_reg_write(s, saddr, val);
1382 }
1383 
1384 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1385                                     unsigned int size)
1386 {
1387     SysBusESPState *sysbus = opaque;
1388     ESPState *s = ESP(&sysbus->esp);
1389     uint32_t saddr;
1390 
1391     saddr = addr >> sysbus->it_shift;
1392     return esp_reg_read(s, saddr);
1393 }
1394 
1395 static const MemoryRegionOps sysbus_esp_mem_ops = {
1396     .read = sysbus_esp_mem_read,
1397     .write = sysbus_esp_mem_write,
1398     .endianness = DEVICE_NATIVE_ENDIAN,
1399     .valid.accepts = esp_mem_accepts,
1400 };
1401 
1402 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1403                                   uint64_t val, unsigned int size)
1404 {
1405     SysBusESPState *sysbus = opaque;
1406     ESPState *s = ESP(&sysbus->esp);
1407 
1408     trace_esp_pdma_write(size);
1409 
1410     switch (size) {
1411     case 1:
1412         esp_pdma_write(s, val);
1413         break;
1414     case 2:
1415         esp_pdma_write(s, val >> 8);
1416         esp_pdma_write(s, val);
1417         break;
1418     }
1419     esp_do_dma(s);
1420 }
1421 
1422 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1423                                      unsigned int size)
1424 {
1425     SysBusESPState *sysbus = opaque;
1426     ESPState *s = ESP(&sysbus->esp);
1427     uint64_t val = 0;
1428 
1429     trace_esp_pdma_read(size);
1430 
1431     switch (size) {
1432     case 1:
1433         val = esp_pdma_read(s);
1434         break;
1435     case 2:
1436         val = esp_pdma_read(s);
1437         val = (val << 8) | esp_pdma_read(s);
1438         break;
1439     }
1440     esp_do_dma(s);
1441     return val;
1442 }
1443 
1444 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1445 {
1446     ESPState *s = container_of(req->bus, ESPState, bus);
1447 
1448     scsi_req_ref(req);
1449     s->current_req = req;
1450     return s;
1451 }
1452 
1453 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1454     .read = sysbus_esp_pdma_read,
1455     .write = sysbus_esp_pdma_write,
1456     .endianness = DEVICE_NATIVE_ENDIAN,
1457     .valid.min_access_size = 1,
1458     .valid.max_access_size = 4,
1459     .impl.min_access_size = 1,
1460     .impl.max_access_size = 2,
1461 };
1462 
1463 static const struct SCSIBusInfo esp_scsi_info = {
1464     .tcq = false,
1465     .max_target = ESP_MAX_DEVS,
1466     .max_lun = 7,
1467 
1468     .load_request = esp_load_request,
1469     .transfer_data = esp_transfer_data,
1470     .complete = esp_command_complete,
1471     .cancel = esp_request_cancelled
1472 };
1473 
1474 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1475 {
1476     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1477     ESPState *s = ESP(&sysbus->esp);
1478 
1479     switch (irq) {
1480     case 0:
1481         parent_esp_reset(s, irq, level);
1482         break;
1483     case 1:
1484         esp_dma_enable(s, irq, level);
1485         break;
1486     }
1487 }
1488 
1489 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1490 {
1491     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1492     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1493     ESPState *s = ESP(&sysbus->esp);
1494 
1495     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1496         return;
1497     }
1498 
1499     sysbus_init_irq(sbd, &s->irq);
1500     sysbus_init_irq(sbd, &s->drq_irq);
1501     assert(sysbus->it_shift != -1);
1502 
1503     s->chip_id = TCHI_FAS100A;
1504     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1505                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1506     sysbus_init_mmio(sbd, &sysbus->iomem);
1507     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1508                           sysbus, "esp-pdma", 4);
1509     sysbus_init_mmio(sbd, &sysbus->pdma);
1510 
1511     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1512 
1513     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1514 }
1515 
1516 static void sysbus_esp_hard_reset(DeviceState *dev)
1517 {
1518     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1519     ESPState *s = ESP(&sysbus->esp);
1520 
1521     esp_hard_reset(s);
1522 }
1523 
1524 static void sysbus_esp_init(Object *obj)
1525 {
1526     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1527 
1528     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1529 }
1530 
1531 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1532     .name = "sysbusespscsi",
1533     .version_id = 2,
1534     .minimum_version_id = 1,
1535     .pre_save = esp_pre_save,
1536     .fields = (const VMStateField[]) {
1537         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1538         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1539         VMSTATE_END_OF_LIST()
1540     }
1541 };
1542 
1543 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1544 {
1545     DeviceClass *dc = DEVICE_CLASS(klass);
1546 
1547     dc->realize = sysbus_esp_realize;
1548     dc->reset = sysbus_esp_hard_reset;
1549     dc->vmsd = &vmstate_sysbus_esp_scsi;
1550     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1551 }
1552 
1553 static void esp_finalize(Object *obj)
1554 {
1555     ESPState *s = ESP(obj);
1556 
1557     fifo8_destroy(&s->fifo);
1558     fifo8_destroy(&s->cmdfifo);
1559 }
1560 
1561 static void esp_init(Object *obj)
1562 {
1563     ESPState *s = ESP(obj);
1564 
1565     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1566     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1567 }
1568 
1569 static void esp_class_init(ObjectClass *klass, void *data)
1570 {
1571     DeviceClass *dc = DEVICE_CLASS(klass);
1572 
1573     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1574     dc->user_creatable = false;
1575     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1576 }
1577 
1578 static const TypeInfo esp_info_types[] = {
1579     {
1580         .name          = TYPE_SYSBUS_ESP,
1581         .parent        = TYPE_SYS_BUS_DEVICE,
1582         .instance_init = sysbus_esp_init,
1583         .instance_size = sizeof(SysBusESPState),
1584         .class_init    = sysbus_esp_class_init,
1585     },
1586     {
1587         .name = TYPE_ESP,
1588         .parent = TYPE_DEVICE,
1589         .instance_init = esp_init,
1590         .instance_finalize = esp_finalize,
1591         .instance_size = sizeof(ESPState),
1592         .class_init = esp_class_init,
1593     },
1594 };
1595 
1596 DEFINE_TYPES(esp_info_types)
1597