xref: /openbmc/qemu/hw/scsi/esp.c (revision 0d66549c)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98         s->async_len = 0;
99     }
100 }
101 
102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
103 {
104     if (fifo8_num_used(fifo) == fifo->capacity) {
105         trace_esp_error_fifo_overrun();
106         return;
107     }
108 
109     fifo8_push(fifo, val);
110 }
111 
112 static uint8_t esp_fifo_pop(Fifo8 *fifo)
113 {
114     if (fifo8_is_empty(fifo)) {
115         return 0;
116     }
117 
118     return fifo8_pop(fifo);
119 }
120 
121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
122 {
123     const uint8_t *buf;
124     uint32_t n;
125 
126     if (maxlen == 0) {
127         return 0;
128     }
129 
130     buf = fifo8_pop_buf(fifo, maxlen, &n);
131     if (dest) {
132         memcpy(dest, buf, n);
133     }
134 
135     return n;
136 }
137 
138 static uint32_t esp_get_tc(ESPState *s)
139 {
140     uint32_t dmalen;
141 
142     dmalen = s->rregs[ESP_TCLO];
143     dmalen |= s->rregs[ESP_TCMID] << 8;
144     dmalen |= s->rregs[ESP_TCHI] << 16;
145 
146     return dmalen;
147 }
148 
149 static void esp_set_tc(ESPState *s, uint32_t dmalen)
150 {
151     s->rregs[ESP_TCLO] = dmalen;
152     s->rregs[ESP_TCMID] = dmalen >> 8;
153     s->rregs[ESP_TCHI] = dmalen >> 16;
154 }
155 
156 static uint32_t esp_get_stc(ESPState *s)
157 {
158     uint32_t dmalen;
159 
160     dmalen = s->wregs[ESP_TCLO];
161     dmalen |= s->wregs[ESP_TCMID] << 8;
162     dmalen |= s->wregs[ESP_TCHI] << 16;
163 
164     return dmalen;
165 }
166 
167 static uint8_t esp_pdma_read(ESPState *s)
168 {
169     uint8_t val;
170 
171     if (s->do_cmd) {
172         val = esp_fifo_pop(&s->cmdfifo);
173     } else {
174         val = esp_fifo_pop(&s->fifo);
175     }
176 
177     return val;
178 }
179 
180 static void esp_pdma_write(ESPState *s, uint8_t val)
181 {
182     uint32_t dmalen = esp_get_tc(s);
183 
184     if (dmalen == 0) {
185         return;
186     }
187 
188     if (s->do_cmd) {
189         esp_fifo_push(&s->cmdfifo, val);
190     } else {
191         esp_fifo_push(&s->fifo, val);
192     }
193 
194     dmalen--;
195     esp_set_tc(s, dmalen);
196 }
197 
198 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
199 {
200     s->pdma_cb = cb;
201 }
202 
203 static int esp_select(ESPState *s)
204 {
205     int target;
206 
207     target = s->wregs[ESP_WBUSID] & BUSID_DID;
208 
209     s->ti_size = 0;
210     fifo8_reset(&s->fifo);
211 
212     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
213     if (!s->current_dev) {
214         /* No such drive */
215         s->rregs[ESP_RSTAT] = 0;
216         s->rregs[ESP_RINTR] = INTR_DC;
217         s->rregs[ESP_RSEQ] = SEQ_0;
218         esp_raise_irq(s);
219         return -1;
220     }
221 
222     /*
223      * Note that we deliberately don't raise the IRQ here: this will be done
224      * either in do_command_phase() for DATA OUT transfers or by the deferred
225      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
226      */
227     s->rregs[ESP_RINTR] |= INTR_FC;
228     s->rregs[ESP_RSEQ] = SEQ_CD;
229     return 0;
230 }
231 
232 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
233 {
234     uint8_t buf[ESP_CMDFIFO_SZ];
235     uint32_t dmalen, n;
236     int target;
237 
238     if (s->current_req) {
239         /* Started a new command before the old one finished.  Cancel it.  */
240         scsi_req_cancel(s->current_req);
241     }
242 
243     target = s->wregs[ESP_WBUSID] & BUSID_DID;
244     if (s->dma) {
245         dmalen = MIN(esp_get_tc(s), maxlen);
246         if (dmalen == 0) {
247             return 0;
248         }
249         if (s->dma_memory_read) {
250             s->dma_memory_read(s->dma_opaque, buf, dmalen);
251             dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
252             fifo8_push_all(&s->cmdfifo, buf, dmalen);
253         } else {
254             if (esp_select(s) < 0) {
255                 fifo8_reset(&s->cmdfifo);
256                 return -1;
257             }
258             esp_raise_drq(s);
259             fifo8_reset(&s->cmdfifo);
260             return 0;
261         }
262     } else {
263         dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
264         if (dmalen == 0) {
265             return 0;
266         }
267         n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
268         n = MIN(fifo8_num_free(&s->cmdfifo), n);
269         fifo8_push_all(&s->cmdfifo, buf, n);
270     }
271     trace_esp_get_cmd(dmalen, target);
272 
273     if (esp_select(s) < 0) {
274         fifo8_reset(&s->cmdfifo);
275         return -1;
276     }
277     return dmalen;
278 }
279 
280 static void do_command_phase(ESPState *s)
281 {
282     uint32_t cmdlen;
283     int32_t datalen;
284     SCSIDevice *current_lun;
285     uint8_t buf[ESP_CMDFIFO_SZ];
286 
287     trace_esp_do_command_phase(s->lun);
288     cmdlen = fifo8_num_used(&s->cmdfifo);
289     if (!cmdlen || !s->current_dev) {
290         return;
291     }
292     esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
293 
294     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
295     if (!current_lun) {
296         /* No such drive */
297         s->rregs[ESP_RSTAT] = 0;
298         s->rregs[ESP_RINTR] = INTR_DC;
299         s->rregs[ESP_RSEQ] = SEQ_0;
300         esp_raise_irq(s);
301         return;
302     }
303 
304     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
305     datalen = scsi_req_enqueue(s->current_req);
306     s->ti_size = datalen;
307     fifo8_reset(&s->cmdfifo);
308     if (datalen != 0) {
309         s->rregs[ESP_RSTAT] = STAT_TC;
310         s->rregs[ESP_RSEQ] = SEQ_CD;
311         s->ti_cmd = 0;
312         esp_set_tc(s, 0);
313         if (datalen > 0) {
314             /*
315              * Switch to DATA IN phase but wait until initial data xfer is
316              * complete before raising the command completion interrupt
317              */
318             s->data_in_ready = false;
319             s->rregs[ESP_RSTAT] |= STAT_DI;
320         } else {
321             s->rregs[ESP_RSTAT] |= STAT_DO;
322             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
323             esp_raise_irq(s);
324             esp_lower_drq(s);
325         }
326         scsi_req_continue(s->current_req);
327         return;
328     }
329 }
330 
331 static void do_message_phase(ESPState *s)
332 {
333     if (s->cmdfifo_cdb_offset) {
334         uint8_t message = esp_fifo_pop(&s->cmdfifo);
335 
336         trace_esp_do_identify(message);
337         s->lun = message & 7;
338         s->cmdfifo_cdb_offset--;
339     }
340 
341     /* Ignore extended messages for now */
342     if (s->cmdfifo_cdb_offset) {
343         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
344         esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
345         s->cmdfifo_cdb_offset = 0;
346     }
347 }
348 
349 static void do_cmd(ESPState *s)
350 {
351     do_message_phase(s);
352     assert(s->cmdfifo_cdb_offset == 0);
353     do_command_phase(s);
354 }
355 
356 static void satn_pdma_cb(ESPState *s)
357 {
358     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
359         s->cmdfifo_cdb_offset = 1;
360         s->do_cmd = 0;
361         do_cmd(s);
362     }
363 }
364 
365 static void handle_satn(ESPState *s)
366 {
367     int32_t cmdlen;
368 
369     if (s->dma && !s->dma_enabled) {
370         s->dma_cb = handle_satn;
371         return;
372     }
373     esp_set_pdma_cb(s, SATN_PDMA_CB);
374     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
375     if (cmdlen > 0) {
376         s->cmdfifo_cdb_offset = 1;
377         s->do_cmd = 0;
378         do_cmd(s);
379     } else if (cmdlen == 0) {
380         s->do_cmd = 1;
381         /* Target present, but no cmd yet - switch to command phase */
382         s->rregs[ESP_RSEQ] = SEQ_CD;
383         s->rregs[ESP_RSTAT] = STAT_CD;
384     }
385 }
386 
387 static void s_without_satn_pdma_cb(ESPState *s)
388 {
389     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
390         s->cmdfifo_cdb_offset = 0;
391         s->do_cmd = 0;
392         do_cmd(s);
393     }
394 }
395 
396 static void handle_s_without_atn(ESPState *s)
397 {
398     int32_t cmdlen;
399 
400     if (s->dma && !s->dma_enabled) {
401         s->dma_cb = handle_s_without_atn;
402         return;
403     }
404     esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
405     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
406     if (cmdlen > 0) {
407         s->cmdfifo_cdb_offset = 0;
408         s->do_cmd = 0;
409         do_cmd(s);
410     } else if (cmdlen == 0) {
411         s->do_cmd = 1;
412         /* Target present, but no cmd yet - switch to command phase */
413         s->rregs[ESP_RSEQ] = SEQ_CD;
414         s->rregs[ESP_RSTAT] = STAT_CD;
415     }
416 }
417 
418 static void satn_stop_pdma_cb(ESPState *s)
419 {
420     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
421         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
422         s->do_cmd = 1;
423         s->cmdfifo_cdb_offset = 1;
424         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
425         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
426         s->rregs[ESP_RSEQ] = SEQ_CD;
427         esp_raise_irq(s);
428     }
429 }
430 
431 static void handle_satn_stop(ESPState *s)
432 {
433     int32_t cmdlen;
434 
435     if (s->dma && !s->dma_enabled) {
436         s->dma_cb = handle_satn_stop;
437         return;
438     }
439     esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
440     cmdlen = get_cmd(s, 1);
441     if (cmdlen > 0) {
442         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
443         s->do_cmd = 1;
444         s->cmdfifo_cdb_offset = 1;
445         s->rregs[ESP_RSTAT] = STAT_MO;
446         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
447         s->rregs[ESP_RSEQ] = SEQ_MO;
448         esp_raise_irq(s);
449     } else if (cmdlen == 0) {
450         s->do_cmd = 1;
451         /* Target present, switch to message out phase */
452         s->rregs[ESP_RSEQ] = SEQ_MO;
453         s->rregs[ESP_RSTAT] = STAT_MO;
454     }
455 }
456 
457 static void write_response_pdma_cb(ESPState *s)
458 {
459     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
460     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
461     s->rregs[ESP_RSEQ] = SEQ_CD;
462     esp_raise_irq(s);
463 }
464 
465 static void write_response(ESPState *s)
466 {
467     uint8_t buf[2];
468 
469     trace_esp_write_response(s->status);
470 
471     buf[0] = s->status;
472     buf[1] = 0;
473 
474     if (s->dma) {
475         if (s->dma_memory_write) {
476             s->dma_memory_write(s->dma_opaque, buf, 2);
477             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
478             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
479             s->rregs[ESP_RSEQ] = SEQ_CD;
480         } else {
481             esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
482             esp_raise_drq(s);
483             return;
484         }
485     } else {
486         fifo8_reset(&s->fifo);
487         fifo8_push_all(&s->fifo, buf, 2);
488         s->rregs[ESP_RFLAGS] = 2;
489     }
490     esp_raise_irq(s);
491 }
492 
493 static void esp_dma_done(ESPState *s)
494 {
495     s->rregs[ESP_RSTAT] |= STAT_TC;
496     s->rregs[ESP_RINTR] |= INTR_BS;
497     s->rregs[ESP_RFLAGS] = 0;
498     esp_set_tc(s, 0);
499     esp_raise_irq(s);
500 }
501 
502 static void do_dma_pdma_cb(ESPState *s)
503 {
504     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
505     int len;
506     uint32_t n;
507 
508     if (s->do_cmd) {
509         /* Ensure we have received complete command after SATN and stop */
510         if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
511             return;
512         }
513 
514         s->ti_size = 0;
515         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
516             /* No command received */
517             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
518                 return;
519             }
520 
521             /* Command has been received */
522             s->do_cmd = 0;
523             do_cmd(s);
524         } else {
525             /*
526              * Extra message out bytes received: update cmdfifo_cdb_offset
527              * and then switch to command phase
528              */
529             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
530             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
531             s->rregs[ESP_RSEQ] = SEQ_CD;
532             s->rregs[ESP_RINTR] |= INTR_BS;
533             esp_raise_irq(s);
534         }
535         return;
536     }
537 
538     if (!s->current_req) {
539         return;
540     }
541 
542     if (to_device) {
543         /* Copy FIFO data to device */
544         len = MIN(s->async_len, ESP_FIFO_SZ);
545         len = MIN(len, fifo8_num_used(&s->fifo));
546         n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
547         s->async_buf += n;
548         s->async_len -= n;
549         s->ti_size += n;
550 
551         if (n < len) {
552             /* Unaligned accesses can cause FIFO wraparound */
553             len = len - n;
554             n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
555             s->async_buf += n;
556             s->async_len -= n;
557             s->ti_size += n;
558         }
559 
560         if (s->async_len == 0) {
561             scsi_req_continue(s->current_req);
562             return;
563         }
564 
565         if (esp_get_tc(s) == 0) {
566             esp_lower_drq(s);
567             esp_dma_done(s);
568         }
569 
570         return;
571     } else {
572         if (s->async_len == 0) {
573             /* Defer until the scsi layer has completed */
574             scsi_req_continue(s->current_req);
575             s->data_in_ready = false;
576             return;
577         }
578 
579         if (esp_get_tc(s) != 0) {
580             /* Copy device data to FIFO */
581             len = MIN(s->async_len, esp_get_tc(s));
582             len = MIN(len, fifo8_num_free(&s->fifo));
583             fifo8_push_all(&s->fifo, s->async_buf, len);
584             s->async_buf += len;
585             s->async_len -= len;
586             s->ti_size -= len;
587             esp_set_tc(s, esp_get_tc(s) - len);
588 
589             if (esp_get_tc(s) == 0) {
590                 /* Indicate transfer to FIFO is complete */
591                  s->rregs[ESP_RSTAT] |= STAT_TC;
592             }
593             return;
594         }
595 
596         /* Partially filled a scsi buffer. Complete immediately.  */
597         esp_lower_drq(s);
598         esp_dma_done(s);
599     }
600 }
601 
602 static void esp_do_dma(ESPState *s)
603 {
604     uint32_t len, cmdlen;
605     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
606     uint8_t buf[ESP_CMDFIFO_SZ];
607 
608     len = esp_get_tc(s);
609     if (s->do_cmd) {
610         /*
611          * handle_ti_cmd() case: esp_do_dma() is called only from
612          * handle_ti_cmd() with do_cmd != NULL (see the assert())
613          */
614         cmdlen = fifo8_num_used(&s->cmdfifo);
615         trace_esp_do_dma(cmdlen, len);
616         if (s->dma_memory_read) {
617             len = MIN(len, fifo8_num_free(&s->cmdfifo));
618             s->dma_memory_read(s->dma_opaque, buf, len);
619             fifo8_push_all(&s->cmdfifo, buf, len);
620         } else {
621             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
622             esp_raise_drq(s);
623             return;
624         }
625         trace_esp_handle_ti_cmd(cmdlen);
626         s->ti_size = 0;
627         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
628             /* No command received */
629             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
630                 return;
631             }
632 
633             /* Command has been received */
634             s->do_cmd = 0;
635             do_cmd(s);
636         } else {
637             /*
638              * Extra message out bytes received: update cmdfifo_cdb_offset
639              * and then switch to command phase
640              */
641             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
642             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
643             s->rregs[ESP_RSEQ] = SEQ_CD;
644             s->rregs[ESP_RINTR] |= INTR_BS;
645             esp_raise_irq(s);
646         }
647         return;
648     }
649     if (!s->current_req) {
650         return;
651     }
652     if (s->async_len == 0) {
653         /* Defer until data is available.  */
654         return;
655     }
656     if (len > s->async_len) {
657         len = s->async_len;
658     }
659     if (to_device) {
660         if (s->dma_memory_read) {
661             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
662         } else {
663             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
664             esp_raise_drq(s);
665             return;
666         }
667     } else {
668         if (s->dma_memory_write) {
669             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
670         } else {
671             /* Adjust TC for any leftover data in the FIFO */
672             if (!fifo8_is_empty(&s->fifo)) {
673                 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
674             }
675 
676             /* Copy device data to FIFO */
677             len = MIN(len, fifo8_num_free(&s->fifo));
678             fifo8_push_all(&s->fifo, s->async_buf, len);
679             s->async_buf += len;
680             s->async_len -= len;
681             s->ti_size -= len;
682 
683             /*
684              * MacOS toolbox uses a TI length of 16 bytes for all commands, so
685              * commands shorter than this must be padded accordingly
686              */
687             if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
688                 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
689                     esp_fifo_push(&s->fifo, 0);
690                     len++;
691                 }
692             }
693 
694             esp_set_tc(s, esp_get_tc(s) - len);
695             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
696             esp_raise_drq(s);
697 
698             /* Indicate transfer to FIFO is complete */
699             s->rregs[ESP_RSTAT] |= STAT_TC;
700             return;
701         }
702     }
703     esp_set_tc(s, esp_get_tc(s) - len);
704     s->async_buf += len;
705     s->async_len -= len;
706     if (to_device) {
707         s->ti_size += len;
708     } else {
709         s->ti_size -= len;
710     }
711     if (s->async_len == 0) {
712         scsi_req_continue(s->current_req);
713         /*
714          * If there is still data to be read from the device then
715          * complete the DMA operation immediately.  Otherwise defer
716          * until the scsi layer has completed.
717          */
718         if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
719             return;
720         }
721     }
722 
723     /* Partially filled a scsi buffer. Complete immediately.  */
724     esp_dma_done(s);
725     esp_lower_drq(s);
726 }
727 
728 static void esp_do_nodma(ESPState *s)
729 {
730     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
731     uint32_t cmdlen;
732     int len;
733 
734     if (s->do_cmd) {
735         cmdlen = fifo8_num_used(&s->cmdfifo);
736         trace_esp_handle_ti_cmd(cmdlen);
737         s->ti_size = 0;
738         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
739             /* No command received */
740             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
741                 return;
742             }
743 
744             /* Command has been received */
745             s->do_cmd = 0;
746             do_cmd(s);
747         } else {
748             /*
749              * Extra message out bytes received: update cmdfifo_cdb_offset
750              * and then switch to command phase
751              */
752             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
753             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
754             s->rregs[ESP_RSEQ] = SEQ_CD;
755             s->rregs[ESP_RINTR] |= INTR_BS;
756             esp_raise_irq(s);
757         }
758         return;
759     }
760 
761     if (!s->current_req) {
762         return;
763     }
764 
765     if (s->async_len == 0) {
766         /* Defer until data is available.  */
767         return;
768     }
769 
770     if (to_device) {
771         len = MIN(s->async_len, ESP_FIFO_SZ);
772         len = MIN(len, fifo8_num_used(&s->fifo));
773         esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
774         s->async_buf += len;
775         s->async_len -= len;
776         s->ti_size += len;
777     } else {
778         if (fifo8_is_empty(&s->fifo)) {
779             fifo8_push(&s->fifo, s->async_buf[0]);
780             s->async_buf++;
781             s->async_len--;
782             s->ti_size--;
783         }
784     }
785 
786     if (s->async_len == 0) {
787         scsi_req_continue(s->current_req);
788         return;
789     }
790 
791     s->rregs[ESP_RINTR] |= INTR_BS;
792     esp_raise_irq(s);
793 }
794 
795 static void esp_pdma_cb(ESPState *s)
796 {
797     switch (s->pdma_cb) {
798     case SATN_PDMA_CB:
799         satn_pdma_cb(s);
800         break;
801     case S_WITHOUT_SATN_PDMA_CB:
802         s_without_satn_pdma_cb(s);
803         break;
804     case SATN_STOP_PDMA_CB:
805         satn_stop_pdma_cb(s);
806         break;
807     case WRITE_RESPONSE_PDMA_CB:
808         write_response_pdma_cb(s);
809         break;
810     case DO_DMA_PDMA_CB:
811         do_dma_pdma_cb(s);
812         break;
813     default:
814         g_assert_not_reached();
815     }
816 }
817 
818 void esp_command_complete(SCSIRequest *req, size_t resid)
819 {
820     ESPState *s = req->hba_private;
821     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
822 
823     trace_esp_command_complete();
824 
825     /*
826      * Non-DMA transfers from the target will leave the last byte in
827      * the FIFO so don't reset ti_size in this case
828      */
829     if (s->dma || to_device) {
830         if (s->ti_size != 0) {
831             trace_esp_command_complete_unexpected();
832         }
833         s->ti_size = 0;
834     }
835 
836     s->async_len = 0;
837     if (req->status) {
838         trace_esp_command_complete_fail();
839     }
840     s->status = req->status;
841 
842     /*
843      * If the transfer is finished, switch to status phase. For non-DMA
844      * transfers from the target the last byte is still in the FIFO
845      */
846     if (s->ti_size == 0) {
847         s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
848         esp_dma_done(s);
849         esp_lower_drq(s);
850     }
851 
852     if (s->current_req) {
853         scsi_req_unref(s->current_req);
854         s->current_req = NULL;
855         s->current_dev = NULL;
856     }
857 }
858 
859 void esp_transfer_data(SCSIRequest *req, uint32_t len)
860 {
861     ESPState *s = req->hba_private;
862     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
863     uint32_t dmalen = esp_get_tc(s);
864 
865     assert(!s->do_cmd);
866     trace_esp_transfer_data(dmalen, s->ti_size);
867     s->async_len = len;
868     s->async_buf = scsi_req_get_buf(req);
869 
870     if (!to_device && !s->data_in_ready) {
871         /*
872          * Initial incoming data xfer is complete so raise command
873          * completion interrupt
874          */
875         s->data_in_ready = true;
876         s->rregs[ESP_RSTAT] |= STAT_TC;
877         s->rregs[ESP_RINTR] |= INTR_BS;
878         esp_raise_irq(s);
879     }
880 
881     if (s->ti_cmd == 0) {
882         /*
883          * Always perform the initial transfer upon reception of the next TI
884          * command to ensure the DMA/non-DMA status of the command is correct.
885          * It is not possible to use s->dma directly in the section below as
886          * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
887          * async data transfer is delayed then s->dma is set incorrectly.
888          */
889         return;
890     }
891 
892     if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
893         if (dmalen) {
894             esp_do_dma(s);
895         } else if (s->ti_size <= 0) {
896             /*
897              * If this was the last part of a DMA transfer then the
898              * completion interrupt is deferred to here.
899              */
900             esp_dma_done(s);
901             esp_lower_drq(s);
902         }
903     } else if (s->ti_cmd == CMD_TI) {
904         esp_do_nodma(s);
905     }
906 }
907 
908 static void handle_ti(ESPState *s)
909 {
910     uint32_t dmalen;
911 
912     if (s->dma && !s->dma_enabled) {
913         s->dma_cb = handle_ti;
914         return;
915     }
916 
917     s->ti_cmd = s->rregs[ESP_CMD];
918     if (s->dma) {
919         dmalen = esp_get_tc(s);
920         trace_esp_handle_ti(dmalen);
921         s->rregs[ESP_RSTAT] &= ~STAT_TC;
922         esp_do_dma(s);
923     } else {
924         trace_esp_handle_ti(s->ti_size);
925         esp_do_nodma(s);
926     }
927 }
928 
929 void esp_hard_reset(ESPState *s)
930 {
931     memset(s->rregs, 0, ESP_REGS);
932     memset(s->wregs, 0, ESP_REGS);
933     s->tchi_written = 0;
934     s->ti_size = 0;
935     s->async_len = 0;
936     fifo8_reset(&s->fifo);
937     fifo8_reset(&s->cmdfifo);
938     s->dma = 0;
939     s->do_cmd = 0;
940     s->dma_cb = NULL;
941 
942     s->rregs[ESP_CFG1] = 7;
943 }
944 
945 static void esp_soft_reset(ESPState *s)
946 {
947     qemu_irq_lower(s->irq);
948     qemu_irq_lower(s->irq_data);
949     esp_hard_reset(s);
950 }
951 
952 static void esp_bus_reset(ESPState *s)
953 {
954     bus_cold_reset(BUS(&s->bus));
955 }
956 
957 static void parent_esp_reset(ESPState *s, int irq, int level)
958 {
959     if (level) {
960         esp_soft_reset(s);
961     }
962 }
963 
964 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
965 {
966     uint32_t val;
967 
968     switch (saddr) {
969     case ESP_FIFO:
970         if (s->dma_memory_read && s->dma_memory_write &&
971                 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
972             /* Data out.  */
973             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
974             s->rregs[ESP_FIFO] = 0;
975         } else {
976             if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
977                 if (s->ti_size) {
978                     esp_do_nodma(s);
979                 } else {
980                     /*
981                      * The last byte of a non-DMA transfer has been read out
982                      * of the FIFO so switch to status phase
983                      */
984                     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
985                 }
986             }
987             s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
988         }
989         val = s->rregs[ESP_FIFO];
990         break;
991     case ESP_RINTR:
992         /*
993          * Clear sequence step, interrupt register and all status bits
994          * except TC
995          */
996         val = s->rregs[ESP_RINTR];
997         s->rregs[ESP_RINTR] = 0;
998         s->rregs[ESP_RSTAT] &= ~STAT_TC;
999         /*
1000          * According to the datasheet ESP_RSEQ should be cleared, but as the
1001          * emulation currently defers information transfers to the next TI
1002          * command leave it for now so that pedantic guests such as the old
1003          * Linux 2.6 driver see the correct flags before the next SCSI phase
1004          * transition.
1005          *
1006          * s->rregs[ESP_RSEQ] = SEQ_0;
1007          */
1008         esp_lower_irq(s);
1009         break;
1010     case ESP_TCHI:
1011         /* Return the unique id if the value has never been written */
1012         if (!s->tchi_written) {
1013             val = s->chip_id;
1014         } else {
1015             val = s->rregs[saddr];
1016         }
1017         break;
1018      case ESP_RFLAGS:
1019         /* Bottom 5 bits indicate number of bytes in FIFO */
1020         val = fifo8_num_used(&s->fifo);
1021         break;
1022     default:
1023         val = s->rregs[saddr];
1024         break;
1025     }
1026 
1027     trace_esp_mem_readb(saddr, val);
1028     return val;
1029 }
1030 
1031 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1032 {
1033     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1034     switch (saddr) {
1035     case ESP_TCHI:
1036         s->tchi_written = true;
1037         /* fall through */
1038     case ESP_TCLO:
1039     case ESP_TCMID:
1040         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1041         break;
1042     case ESP_FIFO:
1043         if (s->do_cmd) {
1044             esp_fifo_push(&s->cmdfifo, val);
1045 
1046             /*
1047              * If any unexpected message out/command phase data is
1048              * transferred using non-DMA, raise the interrupt
1049              */
1050             if (s->rregs[ESP_CMD] == CMD_TI) {
1051                 s->rregs[ESP_RINTR] |= INTR_BS;
1052                 esp_raise_irq(s);
1053             }
1054         } else {
1055             esp_fifo_push(&s->fifo, val);
1056         }
1057         break;
1058     case ESP_CMD:
1059         s->rregs[saddr] = val;
1060         if (val & CMD_DMA) {
1061             s->dma = 1;
1062             /* Reload DMA counter.  */
1063             if (esp_get_stc(s) == 0) {
1064                 esp_set_tc(s, 0x10000);
1065             } else {
1066                 esp_set_tc(s, esp_get_stc(s));
1067             }
1068         } else {
1069             s->dma = 0;
1070         }
1071         switch (val & CMD_CMD) {
1072         case CMD_NOP:
1073             trace_esp_mem_writeb_cmd_nop(val);
1074             break;
1075         case CMD_FLUSH:
1076             trace_esp_mem_writeb_cmd_flush(val);
1077             fifo8_reset(&s->fifo);
1078             break;
1079         case CMD_RESET:
1080             trace_esp_mem_writeb_cmd_reset(val);
1081             esp_soft_reset(s);
1082             break;
1083         case CMD_BUSRESET:
1084             trace_esp_mem_writeb_cmd_bus_reset(val);
1085             esp_bus_reset(s);
1086             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1087                 s->rregs[ESP_RINTR] |= INTR_RST;
1088                 esp_raise_irq(s);
1089             }
1090             break;
1091         case CMD_TI:
1092             trace_esp_mem_writeb_cmd_ti(val);
1093             handle_ti(s);
1094             break;
1095         case CMD_ICCS:
1096             trace_esp_mem_writeb_cmd_iccs(val);
1097             write_response(s);
1098             s->rregs[ESP_RINTR] |= INTR_FC;
1099             s->rregs[ESP_RSTAT] |= STAT_MI;
1100             break;
1101         case CMD_MSGACC:
1102             trace_esp_mem_writeb_cmd_msgacc(val);
1103             s->rregs[ESP_RINTR] |= INTR_DC;
1104             s->rregs[ESP_RSEQ] = 0;
1105             s->rregs[ESP_RFLAGS] = 0;
1106             esp_raise_irq(s);
1107             break;
1108         case CMD_PAD:
1109             trace_esp_mem_writeb_cmd_pad(val);
1110             s->rregs[ESP_RSTAT] = STAT_TC;
1111             s->rregs[ESP_RINTR] |= INTR_FC;
1112             s->rregs[ESP_RSEQ] = 0;
1113             break;
1114         case CMD_SATN:
1115             trace_esp_mem_writeb_cmd_satn(val);
1116             break;
1117         case CMD_RSTATN:
1118             trace_esp_mem_writeb_cmd_rstatn(val);
1119             break;
1120         case CMD_SEL:
1121             trace_esp_mem_writeb_cmd_sel(val);
1122             handle_s_without_atn(s);
1123             break;
1124         case CMD_SELATN:
1125             trace_esp_mem_writeb_cmd_selatn(val);
1126             handle_satn(s);
1127             break;
1128         case CMD_SELATNS:
1129             trace_esp_mem_writeb_cmd_selatns(val);
1130             handle_satn_stop(s);
1131             break;
1132         case CMD_ENSEL:
1133             trace_esp_mem_writeb_cmd_ensel(val);
1134             s->rregs[ESP_RINTR] = 0;
1135             break;
1136         case CMD_DISSEL:
1137             trace_esp_mem_writeb_cmd_dissel(val);
1138             s->rregs[ESP_RINTR] = 0;
1139             esp_raise_irq(s);
1140             break;
1141         default:
1142             trace_esp_error_unhandled_command(val);
1143             break;
1144         }
1145         break;
1146     case ESP_WBUSID ... ESP_WSYNO:
1147         break;
1148     case ESP_CFG1:
1149     case ESP_CFG2: case ESP_CFG3:
1150     case ESP_RES3: case ESP_RES4:
1151         s->rregs[saddr] = val;
1152         break;
1153     case ESP_WCCF ... ESP_WTEST:
1154         break;
1155     default:
1156         trace_esp_error_invalid_write(val, saddr);
1157         return;
1158     }
1159     s->wregs[saddr] = val;
1160 }
1161 
1162 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1163                             unsigned size, bool is_write,
1164                             MemTxAttrs attrs)
1165 {
1166     return (size == 1) || (is_write && size == 4);
1167 }
1168 
1169 static bool esp_is_before_version_5(void *opaque, int version_id)
1170 {
1171     ESPState *s = ESP(opaque);
1172 
1173     version_id = MIN(version_id, s->mig_version_id);
1174     return version_id < 5;
1175 }
1176 
1177 static bool esp_is_version_5(void *opaque, int version_id)
1178 {
1179     ESPState *s = ESP(opaque);
1180 
1181     version_id = MIN(version_id, s->mig_version_id);
1182     return version_id >= 5;
1183 }
1184 
1185 static bool esp_is_version_6(void *opaque, int version_id)
1186 {
1187     ESPState *s = ESP(opaque);
1188 
1189     version_id = MIN(version_id, s->mig_version_id);
1190     return version_id >= 6;
1191 }
1192 
1193 int esp_pre_save(void *opaque)
1194 {
1195     ESPState *s = ESP(object_resolve_path_component(
1196                       OBJECT(opaque), "esp"));
1197 
1198     s->mig_version_id = vmstate_esp.version_id;
1199     return 0;
1200 }
1201 
1202 static int esp_post_load(void *opaque, int version_id)
1203 {
1204     ESPState *s = ESP(opaque);
1205     int len, i;
1206 
1207     version_id = MIN(version_id, s->mig_version_id);
1208 
1209     if (version_id < 5) {
1210         esp_set_tc(s, s->mig_dma_left);
1211 
1212         /* Migrate ti_buf to fifo */
1213         len = s->mig_ti_wptr - s->mig_ti_rptr;
1214         for (i = 0; i < len; i++) {
1215             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1216         }
1217 
1218         /* Migrate cmdbuf to cmdfifo */
1219         for (i = 0; i < s->mig_cmdlen; i++) {
1220             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1221         }
1222     }
1223 
1224     s->mig_version_id = vmstate_esp.version_id;
1225     return 0;
1226 }
1227 
1228 /*
1229  * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1230  * guest CPU to perform the transfers between the SCSI bus and memory
1231  * itself. This is indicated by the dma_memory_read and dma_memory_write
1232  * functions being NULL (in contrast to the ESP PCI device) whilst
1233  * dma_enabled is still set.
1234  */
1235 
1236 static bool esp_pdma_needed(void *opaque)
1237 {
1238     ESPState *s = ESP(opaque);
1239 
1240     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
1241            s->dma_enabled;
1242 }
1243 
1244 static const VMStateDescription vmstate_esp_pdma = {
1245     .name = "esp/pdma",
1246     .version_id = 0,
1247     .minimum_version_id = 0,
1248     .needed = esp_pdma_needed,
1249     .fields = (VMStateField[]) {
1250         VMSTATE_UINT8(pdma_cb, ESPState),
1251         VMSTATE_END_OF_LIST()
1252     }
1253 };
1254 
1255 const VMStateDescription vmstate_esp = {
1256     .name = "esp",
1257     .version_id = 6,
1258     .minimum_version_id = 3,
1259     .post_load = esp_post_load,
1260     .fields = (VMStateField[]) {
1261         VMSTATE_BUFFER(rregs, ESPState),
1262         VMSTATE_BUFFER(wregs, ESPState),
1263         VMSTATE_INT32(ti_size, ESPState),
1264         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1265         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1266         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1267         VMSTATE_UINT32(status, ESPState),
1268         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1269                             esp_is_before_version_5),
1270         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1271                           esp_is_before_version_5),
1272         VMSTATE_UINT32(dma, ESPState),
1273         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1274                               esp_is_before_version_5, 0, 16),
1275         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1276                               esp_is_before_version_5, 16,
1277                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1278         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1279         VMSTATE_UINT32(do_cmd, ESPState),
1280         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1281         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1282         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1283         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1284         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1285         VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1286         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1287         VMSTATE_END_OF_LIST()
1288     },
1289     .subsections = (const VMStateDescription * []) {
1290         &vmstate_esp_pdma,
1291         NULL
1292     }
1293 };
1294 
1295 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1296                                  uint64_t val, unsigned int size)
1297 {
1298     SysBusESPState *sysbus = opaque;
1299     ESPState *s = ESP(&sysbus->esp);
1300     uint32_t saddr;
1301 
1302     saddr = addr >> sysbus->it_shift;
1303     esp_reg_write(s, saddr, val);
1304 }
1305 
1306 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1307                                     unsigned int size)
1308 {
1309     SysBusESPState *sysbus = opaque;
1310     ESPState *s = ESP(&sysbus->esp);
1311     uint32_t saddr;
1312 
1313     saddr = addr >> sysbus->it_shift;
1314     return esp_reg_read(s, saddr);
1315 }
1316 
1317 static const MemoryRegionOps sysbus_esp_mem_ops = {
1318     .read = sysbus_esp_mem_read,
1319     .write = sysbus_esp_mem_write,
1320     .endianness = DEVICE_NATIVE_ENDIAN,
1321     .valid.accepts = esp_mem_accepts,
1322 };
1323 
1324 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1325                                   uint64_t val, unsigned int size)
1326 {
1327     SysBusESPState *sysbus = opaque;
1328     ESPState *s = ESP(&sysbus->esp);
1329 
1330     trace_esp_pdma_write(size);
1331 
1332     switch (size) {
1333     case 1:
1334         esp_pdma_write(s, val);
1335         break;
1336     case 2:
1337         esp_pdma_write(s, val >> 8);
1338         esp_pdma_write(s, val);
1339         break;
1340     }
1341     esp_pdma_cb(s);
1342 }
1343 
1344 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1345                                      unsigned int size)
1346 {
1347     SysBusESPState *sysbus = opaque;
1348     ESPState *s = ESP(&sysbus->esp);
1349     uint64_t val = 0;
1350 
1351     trace_esp_pdma_read(size);
1352 
1353     switch (size) {
1354     case 1:
1355         val = esp_pdma_read(s);
1356         break;
1357     case 2:
1358         val = esp_pdma_read(s);
1359         val = (val << 8) | esp_pdma_read(s);
1360         break;
1361     }
1362     if (fifo8_num_used(&s->fifo) < 2) {
1363         esp_pdma_cb(s);
1364     }
1365     return val;
1366 }
1367 
1368 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1369 {
1370     ESPState *s = container_of(req->bus, ESPState, bus);
1371 
1372     scsi_req_ref(req);
1373     s->current_req = req;
1374     return s;
1375 }
1376 
1377 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1378     .read = sysbus_esp_pdma_read,
1379     .write = sysbus_esp_pdma_write,
1380     .endianness = DEVICE_NATIVE_ENDIAN,
1381     .valid.min_access_size = 1,
1382     .valid.max_access_size = 4,
1383     .impl.min_access_size = 1,
1384     .impl.max_access_size = 2,
1385 };
1386 
1387 static const struct SCSIBusInfo esp_scsi_info = {
1388     .tcq = false,
1389     .max_target = ESP_MAX_DEVS,
1390     .max_lun = 7,
1391 
1392     .load_request = esp_load_request,
1393     .transfer_data = esp_transfer_data,
1394     .complete = esp_command_complete,
1395     .cancel = esp_request_cancelled
1396 };
1397 
1398 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1399 {
1400     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1401     ESPState *s = ESP(&sysbus->esp);
1402 
1403     switch (irq) {
1404     case 0:
1405         parent_esp_reset(s, irq, level);
1406         break;
1407     case 1:
1408         esp_dma_enable(s, irq, level);
1409         break;
1410     }
1411 }
1412 
1413 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1414 {
1415     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1416     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1417     ESPState *s = ESP(&sysbus->esp);
1418 
1419     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1420         return;
1421     }
1422 
1423     sysbus_init_irq(sbd, &s->irq);
1424     sysbus_init_irq(sbd, &s->irq_data);
1425     assert(sysbus->it_shift != -1);
1426 
1427     s->chip_id = TCHI_FAS100A;
1428     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1429                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1430     sysbus_init_mmio(sbd, &sysbus->iomem);
1431     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1432                           sysbus, "esp-pdma", 4);
1433     sysbus_init_mmio(sbd, &sysbus->pdma);
1434 
1435     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1436 
1437     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1438 }
1439 
1440 static void sysbus_esp_hard_reset(DeviceState *dev)
1441 {
1442     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1443     ESPState *s = ESP(&sysbus->esp);
1444 
1445     esp_hard_reset(s);
1446 }
1447 
1448 static void sysbus_esp_init(Object *obj)
1449 {
1450     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1451 
1452     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1453 }
1454 
1455 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1456     .name = "sysbusespscsi",
1457     .version_id = 2,
1458     .minimum_version_id = 1,
1459     .pre_save = esp_pre_save,
1460     .fields = (VMStateField[]) {
1461         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1462         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1463         VMSTATE_END_OF_LIST()
1464     }
1465 };
1466 
1467 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1468 {
1469     DeviceClass *dc = DEVICE_CLASS(klass);
1470 
1471     dc->realize = sysbus_esp_realize;
1472     dc->reset = sysbus_esp_hard_reset;
1473     dc->vmsd = &vmstate_sysbus_esp_scsi;
1474     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1475 }
1476 
1477 static const TypeInfo sysbus_esp_info = {
1478     .name          = TYPE_SYSBUS_ESP,
1479     .parent        = TYPE_SYS_BUS_DEVICE,
1480     .instance_init = sysbus_esp_init,
1481     .instance_size = sizeof(SysBusESPState),
1482     .class_init    = sysbus_esp_class_init,
1483 };
1484 
1485 static void esp_finalize(Object *obj)
1486 {
1487     ESPState *s = ESP(obj);
1488 
1489     fifo8_destroy(&s->fifo);
1490     fifo8_destroy(&s->cmdfifo);
1491 }
1492 
1493 static void esp_init(Object *obj)
1494 {
1495     ESPState *s = ESP(obj);
1496 
1497     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1498     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1499 }
1500 
1501 static void esp_class_init(ObjectClass *klass, void *data)
1502 {
1503     DeviceClass *dc = DEVICE_CLASS(klass);
1504 
1505     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1506     dc->user_creatable = false;
1507     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1508 }
1509 
1510 static const TypeInfo esp_info = {
1511     .name = TYPE_ESP,
1512     .parent = TYPE_DEVICE,
1513     .instance_init = esp_init,
1514     .instance_finalize = esp_finalize,
1515     .instance_size = sizeof(ESPState),
1516     .class_init = esp_class_init,
1517 };
1518 
1519 static void esp_register_types(void)
1520 {
1521     type_register_static(&sysbus_esp_info);
1522     type_register_static(&esp_info);
1523 }
1524 
1525 type_init(esp_register_types)
1526