xref: /openbmc/qemu/hw/scsi/esp.c (revision 64552b6b)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "hw/irq.h"
29 #include "hw/scsi/esp.h"
30 #include "trace.h"
31 #include "qemu/log.h"
32 #include "qemu/module.h"
33 
34 /*
35  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
36  * also produced as NCR89C100. See
37  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
38  * and
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
40  */
41 
42 static void esp_raise_irq(ESPState *s)
43 {
44     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
45         s->rregs[ESP_RSTAT] |= STAT_INT;
46         qemu_irq_raise(s->irq);
47         trace_esp_raise_irq();
48     }
49 }
50 
51 static void esp_lower_irq(ESPState *s)
52 {
53     if (s->rregs[ESP_RSTAT] & STAT_INT) {
54         s->rregs[ESP_RSTAT] &= ~STAT_INT;
55         qemu_irq_lower(s->irq);
56         trace_esp_lower_irq();
57     }
58 }
59 
60 void esp_dma_enable(ESPState *s, int irq, int level)
61 {
62     if (level) {
63         s->dma_enabled = 1;
64         trace_esp_dma_enable();
65         if (s->dma_cb) {
66             s->dma_cb(s);
67             s->dma_cb = NULL;
68         }
69     } else {
70         trace_esp_dma_disable();
71         s->dma_enabled = 0;
72     }
73 }
74 
75 void esp_request_cancelled(SCSIRequest *req)
76 {
77     ESPState *s = req->hba_private;
78 
79     if (req == s->current_req) {
80         scsi_req_unref(s->current_req);
81         s->current_req = NULL;
82         s->current_dev = NULL;
83     }
84 }
85 
86 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
87 {
88     uint32_t dmalen;
89     int target;
90 
91     target = s->wregs[ESP_WBUSID] & BUSID_DID;
92     if (s->dma) {
93         dmalen = s->rregs[ESP_TCLO];
94         dmalen |= s->rregs[ESP_TCMID] << 8;
95         dmalen |= s->rregs[ESP_TCHI] << 16;
96         if (dmalen > buflen) {
97             return 0;
98         }
99         s->dma_memory_read(s->dma_opaque, buf, dmalen);
100     } else {
101         dmalen = s->ti_size;
102         if (dmalen > TI_BUFSZ) {
103             return 0;
104         }
105         memcpy(buf, s->ti_buf, dmalen);
106         buf[0] = buf[2] >> 5;
107     }
108     trace_esp_get_cmd(dmalen, target);
109 
110     s->ti_size = 0;
111     s->ti_rptr = 0;
112     s->ti_wptr = 0;
113 
114     if (s->current_req) {
115         /* Started a new command before the old one finished.  Cancel it.  */
116         scsi_req_cancel(s->current_req);
117         s->async_len = 0;
118     }
119 
120     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
121     if (!s->current_dev) {
122         // No such drive
123         s->rregs[ESP_RSTAT] = 0;
124         s->rregs[ESP_RINTR] = INTR_DC;
125         s->rregs[ESP_RSEQ] = SEQ_0;
126         esp_raise_irq(s);
127         return 0;
128     }
129     return dmalen;
130 }
131 
132 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
133 {
134     int32_t datalen;
135     int lun;
136     SCSIDevice *current_lun;
137 
138     trace_esp_do_busid_cmd(busid);
139     lun = busid & 7;
140     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
141     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
142     datalen = scsi_req_enqueue(s->current_req);
143     s->ti_size = datalen;
144     if (datalen != 0) {
145         s->rregs[ESP_RSTAT] = STAT_TC;
146         s->dma_left = 0;
147         s->dma_counter = 0;
148         if (datalen > 0) {
149             s->rregs[ESP_RSTAT] |= STAT_DI;
150         } else {
151             s->rregs[ESP_RSTAT] |= STAT_DO;
152         }
153         scsi_req_continue(s->current_req);
154     }
155     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
156     s->rregs[ESP_RSEQ] = SEQ_CD;
157     esp_raise_irq(s);
158 }
159 
160 static void do_cmd(ESPState *s, uint8_t *buf)
161 {
162     uint8_t busid = buf[0];
163 
164     do_busid_cmd(s, &buf[1], busid);
165 }
166 
167 static void handle_satn(ESPState *s)
168 {
169     uint8_t buf[32];
170     int len;
171 
172     if (s->dma && !s->dma_enabled) {
173         s->dma_cb = handle_satn;
174         return;
175     }
176     len = get_cmd(s, buf, sizeof(buf));
177     if (len)
178         do_cmd(s, buf);
179 }
180 
181 static void handle_s_without_atn(ESPState *s)
182 {
183     uint8_t buf[32];
184     int len;
185 
186     if (s->dma && !s->dma_enabled) {
187         s->dma_cb = handle_s_without_atn;
188         return;
189     }
190     len = get_cmd(s, buf, sizeof(buf));
191     if (len) {
192         do_busid_cmd(s, buf, 0);
193     }
194 }
195 
196 static void handle_satn_stop(ESPState *s)
197 {
198     if (s->dma && !s->dma_enabled) {
199         s->dma_cb = handle_satn_stop;
200         return;
201     }
202     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
203     if (s->cmdlen) {
204         trace_esp_handle_satn_stop(s->cmdlen);
205         s->do_cmd = 1;
206         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
207         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
208         s->rregs[ESP_RSEQ] = SEQ_CD;
209         esp_raise_irq(s);
210     }
211 }
212 
213 static void write_response(ESPState *s)
214 {
215     trace_esp_write_response(s->status);
216     s->ti_buf[0] = s->status;
217     s->ti_buf[1] = 0;
218     if (s->dma) {
219         s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
220         s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
221         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
222         s->rregs[ESP_RSEQ] = SEQ_CD;
223     } else {
224         s->ti_size = 2;
225         s->ti_rptr = 0;
226         s->ti_wptr = 2;
227         s->rregs[ESP_RFLAGS] = 2;
228     }
229     esp_raise_irq(s);
230 }
231 
232 static void esp_dma_done(ESPState *s)
233 {
234     s->rregs[ESP_RSTAT] |= STAT_TC;
235     s->rregs[ESP_RINTR] = INTR_BS;
236     s->rregs[ESP_RSEQ] = 0;
237     s->rregs[ESP_RFLAGS] = 0;
238     s->rregs[ESP_TCLO] = 0;
239     s->rregs[ESP_TCMID] = 0;
240     s->rregs[ESP_TCHI] = 0;
241     esp_raise_irq(s);
242 }
243 
244 static void esp_do_dma(ESPState *s)
245 {
246     uint32_t len;
247     int to_device;
248 
249     len = s->dma_left;
250     if (s->do_cmd) {
251         trace_esp_do_dma(s->cmdlen, len);
252         assert (s->cmdlen <= sizeof(s->cmdbuf) &&
253                 len <= sizeof(s->cmdbuf) - s->cmdlen);
254         s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
255         return;
256     }
257     if (s->async_len == 0) {
258         /* Defer until data is available.  */
259         return;
260     }
261     if (len > s->async_len) {
262         len = s->async_len;
263     }
264     to_device = (s->ti_size < 0);
265     if (to_device) {
266         s->dma_memory_read(s->dma_opaque, s->async_buf, len);
267     } else {
268         s->dma_memory_write(s->dma_opaque, s->async_buf, len);
269     }
270     s->dma_left -= len;
271     s->async_buf += len;
272     s->async_len -= len;
273     if (to_device)
274         s->ti_size += len;
275     else
276         s->ti_size -= len;
277     if (s->async_len == 0) {
278         scsi_req_continue(s->current_req);
279         /* If there is still data to be read from the device then
280            complete the DMA operation immediately.  Otherwise defer
281            until the scsi layer has completed.  */
282         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
283             return;
284         }
285     }
286 
287     /* Partially filled a scsi buffer. Complete immediately.  */
288     esp_dma_done(s);
289 }
290 
291 static void esp_report_command_complete(ESPState *s, uint32_t status)
292 {
293     trace_esp_command_complete();
294     if (s->ti_size != 0) {
295         trace_esp_command_complete_unexpected();
296     }
297     s->ti_size = 0;
298     s->dma_left = 0;
299     s->async_len = 0;
300     if (status) {
301         trace_esp_command_complete_fail();
302     }
303     s->status = status;
304     s->rregs[ESP_RSTAT] = STAT_ST;
305     esp_dma_done(s);
306     if (s->current_req) {
307         scsi_req_unref(s->current_req);
308         s->current_req = NULL;
309         s->current_dev = NULL;
310     }
311 }
312 
313 void esp_command_complete(SCSIRequest *req, uint32_t status,
314                           size_t resid)
315 {
316     ESPState *s = req->hba_private;
317 
318     if (s->rregs[ESP_RSTAT] & STAT_INT) {
319         /* Defer handling command complete until the previous
320          * interrupt has been handled.
321          */
322         trace_esp_command_complete_deferred();
323         s->deferred_status = status;
324         s->deferred_complete = true;
325         return;
326     }
327     esp_report_command_complete(s, status);
328 }
329 
330 void esp_transfer_data(SCSIRequest *req, uint32_t len)
331 {
332     ESPState *s = req->hba_private;
333 
334     assert(!s->do_cmd);
335     trace_esp_transfer_data(s->dma_left, s->ti_size);
336     s->async_len = len;
337     s->async_buf = scsi_req_get_buf(req);
338     if (s->dma_left) {
339         esp_do_dma(s);
340     } else if (s->dma_counter != 0 && s->ti_size <= 0) {
341         /* If this was the last part of a DMA transfer then the
342            completion interrupt is deferred to here.  */
343         esp_dma_done(s);
344     }
345 }
346 
347 static void handle_ti(ESPState *s)
348 {
349     uint32_t dmalen, minlen;
350 
351     if (s->dma && !s->dma_enabled) {
352         s->dma_cb = handle_ti;
353         return;
354     }
355 
356     dmalen = s->rregs[ESP_TCLO];
357     dmalen |= s->rregs[ESP_TCMID] << 8;
358     dmalen |= s->rregs[ESP_TCHI] << 16;
359     if (dmalen==0) {
360       dmalen=0x10000;
361     }
362     s->dma_counter = dmalen;
363 
364     if (s->do_cmd)
365         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
366     else if (s->ti_size < 0)
367         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
368     else
369         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
370     trace_esp_handle_ti(minlen);
371     if (s->dma) {
372         s->dma_left = minlen;
373         s->rregs[ESP_RSTAT] &= ~STAT_TC;
374         esp_do_dma(s);
375     }
376     if (s->do_cmd) {
377         trace_esp_handle_ti_cmd(s->cmdlen);
378         s->ti_size = 0;
379         s->cmdlen = 0;
380         s->do_cmd = 0;
381         do_cmd(s, s->cmdbuf);
382     }
383 }
384 
385 void esp_hard_reset(ESPState *s)
386 {
387     memset(s->rregs, 0, ESP_REGS);
388     memset(s->wregs, 0, ESP_REGS);
389     s->tchi_written = 0;
390     s->ti_size = 0;
391     s->ti_rptr = 0;
392     s->ti_wptr = 0;
393     s->dma = 0;
394     s->do_cmd = 0;
395     s->dma_cb = NULL;
396 
397     s->rregs[ESP_CFG1] = 7;
398 }
399 
400 static void esp_soft_reset(ESPState *s)
401 {
402     qemu_irq_lower(s->irq);
403     esp_hard_reset(s);
404 }
405 
406 static void parent_esp_reset(ESPState *s, int irq, int level)
407 {
408     if (level) {
409         esp_soft_reset(s);
410     }
411 }
412 
413 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
414 {
415     uint32_t old_val;
416 
417     trace_esp_mem_readb(saddr, s->rregs[saddr]);
418     switch (saddr) {
419     case ESP_FIFO:
420         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
421             /* Data out.  */
422             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
423             s->rregs[ESP_FIFO] = 0;
424         } else if (s->ti_rptr < s->ti_wptr) {
425             s->ti_size--;
426             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
427         }
428         if (s->ti_rptr == s->ti_wptr) {
429             s->ti_rptr = 0;
430             s->ti_wptr = 0;
431         }
432         break;
433     case ESP_RINTR:
434         /* Clear sequence step, interrupt register and all status bits
435            except TC */
436         old_val = s->rregs[ESP_RINTR];
437         s->rregs[ESP_RINTR] = 0;
438         s->rregs[ESP_RSTAT] &= ~STAT_TC;
439         s->rregs[ESP_RSEQ] = SEQ_CD;
440         esp_lower_irq(s);
441         if (s->deferred_complete) {
442             esp_report_command_complete(s, s->deferred_status);
443             s->deferred_complete = false;
444         }
445         return old_val;
446     case ESP_TCHI:
447         /* Return the unique id if the value has never been written */
448         if (!s->tchi_written) {
449             return s->chip_id;
450         }
451     default:
452         break;
453     }
454     return s->rregs[saddr];
455 }
456 
457 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
458 {
459     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
460     switch (saddr) {
461     case ESP_TCHI:
462         s->tchi_written = true;
463         /* fall through */
464     case ESP_TCLO:
465     case ESP_TCMID:
466         s->rregs[ESP_RSTAT] &= ~STAT_TC;
467         break;
468     case ESP_FIFO:
469         if (s->do_cmd) {
470             if (s->cmdlen < ESP_CMDBUF_SZ) {
471                 s->cmdbuf[s->cmdlen++] = val & 0xff;
472             } else {
473                 trace_esp_error_fifo_overrun();
474             }
475         } else if (s->ti_wptr == TI_BUFSZ - 1) {
476             trace_esp_error_fifo_overrun();
477         } else {
478             s->ti_size++;
479             s->ti_buf[s->ti_wptr++] = val & 0xff;
480         }
481         break;
482     case ESP_CMD:
483         s->rregs[saddr] = val;
484         if (val & CMD_DMA) {
485             s->dma = 1;
486             /* Reload DMA counter.  */
487             s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
488             s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
489             s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
490         } else {
491             s->dma = 0;
492         }
493         switch(val & CMD_CMD) {
494         case CMD_NOP:
495             trace_esp_mem_writeb_cmd_nop(val);
496             break;
497         case CMD_FLUSH:
498             trace_esp_mem_writeb_cmd_flush(val);
499             //s->ti_size = 0;
500             s->rregs[ESP_RINTR] = INTR_FC;
501             s->rregs[ESP_RSEQ] = 0;
502             s->rregs[ESP_RFLAGS] = 0;
503             break;
504         case CMD_RESET:
505             trace_esp_mem_writeb_cmd_reset(val);
506             esp_soft_reset(s);
507             break;
508         case CMD_BUSRESET:
509             trace_esp_mem_writeb_cmd_bus_reset(val);
510             s->rregs[ESP_RINTR] = INTR_RST;
511             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
512                 esp_raise_irq(s);
513             }
514             break;
515         case CMD_TI:
516             handle_ti(s);
517             break;
518         case CMD_ICCS:
519             trace_esp_mem_writeb_cmd_iccs(val);
520             write_response(s);
521             s->rregs[ESP_RINTR] = INTR_FC;
522             s->rregs[ESP_RSTAT] |= STAT_MI;
523             break;
524         case CMD_MSGACC:
525             trace_esp_mem_writeb_cmd_msgacc(val);
526             s->rregs[ESP_RINTR] = INTR_DC;
527             s->rregs[ESP_RSEQ] = 0;
528             s->rregs[ESP_RFLAGS] = 0;
529             esp_raise_irq(s);
530             break;
531         case CMD_PAD:
532             trace_esp_mem_writeb_cmd_pad(val);
533             s->rregs[ESP_RSTAT] = STAT_TC;
534             s->rregs[ESP_RINTR] = INTR_FC;
535             s->rregs[ESP_RSEQ] = 0;
536             break;
537         case CMD_SATN:
538             trace_esp_mem_writeb_cmd_satn(val);
539             break;
540         case CMD_RSTATN:
541             trace_esp_mem_writeb_cmd_rstatn(val);
542             break;
543         case CMD_SEL:
544             trace_esp_mem_writeb_cmd_sel(val);
545             handle_s_without_atn(s);
546             break;
547         case CMD_SELATN:
548             trace_esp_mem_writeb_cmd_selatn(val);
549             handle_satn(s);
550             break;
551         case CMD_SELATNS:
552             trace_esp_mem_writeb_cmd_selatns(val);
553             handle_satn_stop(s);
554             break;
555         case CMD_ENSEL:
556             trace_esp_mem_writeb_cmd_ensel(val);
557             s->rregs[ESP_RINTR] = 0;
558             break;
559         case CMD_DISSEL:
560             trace_esp_mem_writeb_cmd_dissel(val);
561             s->rregs[ESP_RINTR] = 0;
562             esp_raise_irq(s);
563             break;
564         default:
565             trace_esp_error_unhandled_command(val);
566             break;
567         }
568         break;
569     case ESP_WBUSID ... ESP_WSYNO:
570         break;
571     case ESP_CFG1:
572     case ESP_CFG2: case ESP_CFG3:
573     case ESP_RES3: case ESP_RES4:
574         s->rregs[saddr] = val;
575         break;
576     case ESP_WCCF ... ESP_WTEST:
577         break;
578     default:
579         trace_esp_error_invalid_write(val, saddr);
580         return;
581     }
582     s->wregs[saddr] = val;
583 }
584 
585 static bool esp_mem_accepts(void *opaque, hwaddr addr,
586                             unsigned size, bool is_write,
587                             MemTxAttrs attrs)
588 {
589     return (size == 1) || (is_write && size == 4);
590 }
591 
592 const VMStateDescription vmstate_esp = {
593     .name ="esp",
594     .version_id = 4,
595     .minimum_version_id = 3,
596     .fields = (VMStateField[]) {
597         VMSTATE_BUFFER(rregs, ESPState),
598         VMSTATE_BUFFER(wregs, ESPState),
599         VMSTATE_INT32(ti_size, ESPState),
600         VMSTATE_UINT32(ti_rptr, ESPState),
601         VMSTATE_UINT32(ti_wptr, ESPState),
602         VMSTATE_BUFFER(ti_buf, ESPState),
603         VMSTATE_UINT32(status, ESPState),
604         VMSTATE_UINT32(deferred_status, ESPState),
605         VMSTATE_BOOL(deferred_complete, ESPState),
606         VMSTATE_UINT32(dma, ESPState),
607         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
608         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
609         VMSTATE_UINT32(cmdlen, ESPState),
610         VMSTATE_UINT32(do_cmd, ESPState),
611         VMSTATE_UINT32(dma_left, ESPState),
612         VMSTATE_END_OF_LIST()
613     }
614 };
615 
616 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
617                                  uint64_t val, unsigned int size)
618 {
619     SysBusESPState *sysbus = opaque;
620     uint32_t saddr;
621 
622     saddr = addr >> sysbus->it_shift;
623     esp_reg_write(&sysbus->esp, saddr, val);
624 }
625 
626 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
627                                     unsigned int size)
628 {
629     SysBusESPState *sysbus = opaque;
630     uint32_t saddr;
631 
632     saddr = addr >> sysbus->it_shift;
633     return esp_reg_read(&sysbus->esp, saddr);
634 }
635 
636 static const MemoryRegionOps sysbus_esp_mem_ops = {
637     .read = sysbus_esp_mem_read,
638     .write = sysbus_esp_mem_write,
639     .endianness = DEVICE_NATIVE_ENDIAN,
640     .valid.accepts = esp_mem_accepts,
641 };
642 
643 static const struct SCSIBusInfo esp_scsi_info = {
644     .tcq = false,
645     .max_target = ESP_MAX_DEVS,
646     .max_lun = 7,
647 
648     .transfer_data = esp_transfer_data,
649     .complete = esp_command_complete,
650     .cancel = esp_request_cancelled
651 };
652 
653 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
654 {
655     SysBusESPState *sysbus = ESP_STATE(opaque);
656     ESPState *s = &sysbus->esp;
657 
658     switch (irq) {
659     case 0:
660         parent_esp_reset(s, irq, level);
661         break;
662     case 1:
663         esp_dma_enable(opaque, irq, level);
664         break;
665     }
666 }
667 
668 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
669 {
670     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
671     SysBusESPState *sysbus = ESP_STATE(dev);
672     ESPState *s = &sysbus->esp;
673 
674     sysbus_init_irq(sbd, &s->irq);
675     assert(sysbus->it_shift != -1);
676 
677     s->chip_id = TCHI_FAS100A;
678     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
679                           sysbus, "esp", ESP_REGS << sysbus->it_shift);
680     sysbus_init_mmio(sbd, &sysbus->iomem);
681 
682     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
683 
684     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
685 }
686 
687 static void sysbus_esp_hard_reset(DeviceState *dev)
688 {
689     SysBusESPState *sysbus = ESP_STATE(dev);
690     esp_hard_reset(&sysbus->esp);
691 }
692 
693 static const VMStateDescription vmstate_sysbus_esp_scsi = {
694     .name = "sysbusespscsi",
695     .version_id = 1,
696     .minimum_version_id = 1,
697     .fields = (VMStateField[]) {
698         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
699         VMSTATE_END_OF_LIST()
700     }
701 };
702 
703 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
704 {
705     DeviceClass *dc = DEVICE_CLASS(klass);
706 
707     dc->realize = sysbus_esp_realize;
708     dc->reset = sysbus_esp_hard_reset;
709     dc->vmsd = &vmstate_sysbus_esp_scsi;
710     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
711 }
712 
713 static const TypeInfo sysbus_esp_info = {
714     .name          = TYPE_ESP,
715     .parent        = TYPE_SYS_BUS_DEVICE,
716     .instance_size = sizeof(SysBusESPState),
717     .class_init    = sysbus_esp_class_init,
718 };
719 
720 static void esp_register_types(void)
721 {
722     type_register_static(&sysbus_esp_info);
723 }
724 
725 type_init(esp_register_types)
726