xref: /openbmc/qemu/hw/scsi/esp.c (revision 59a3a1c0)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  */
42 
43 static void esp_raise_irq(ESPState *s)
44 {
45     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
46         s->rregs[ESP_RSTAT] |= STAT_INT;
47         qemu_irq_raise(s->irq);
48         trace_esp_raise_irq();
49     }
50 }
51 
52 static void esp_lower_irq(ESPState *s)
53 {
54     if (s->rregs[ESP_RSTAT] & STAT_INT) {
55         s->rregs[ESP_RSTAT] &= ~STAT_INT;
56         qemu_irq_lower(s->irq);
57         trace_esp_lower_irq();
58     }
59 }
60 
61 void esp_dma_enable(ESPState *s, int irq, int level)
62 {
63     if (level) {
64         s->dma_enabled = 1;
65         trace_esp_dma_enable();
66         if (s->dma_cb) {
67             s->dma_cb(s);
68             s->dma_cb = NULL;
69         }
70     } else {
71         trace_esp_dma_disable();
72         s->dma_enabled = 0;
73     }
74 }
75 
76 void esp_request_cancelled(SCSIRequest *req)
77 {
78     ESPState *s = req->hba_private;
79 
80     if (req == s->current_req) {
81         scsi_req_unref(s->current_req);
82         s->current_req = NULL;
83         s->current_dev = NULL;
84     }
85 }
86 
87 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
88 {
89     uint32_t dmalen;
90     int target;
91 
92     target = s->wregs[ESP_WBUSID] & BUSID_DID;
93     if (s->dma) {
94         dmalen = s->rregs[ESP_TCLO];
95         dmalen |= s->rregs[ESP_TCMID] << 8;
96         dmalen |= s->rregs[ESP_TCHI] << 16;
97         if (dmalen > buflen) {
98             return 0;
99         }
100         s->dma_memory_read(s->dma_opaque, buf, dmalen);
101     } else {
102         dmalen = s->ti_size;
103         if (dmalen > TI_BUFSZ) {
104             return 0;
105         }
106         memcpy(buf, s->ti_buf, dmalen);
107         buf[0] = buf[2] >> 5;
108     }
109     trace_esp_get_cmd(dmalen, target);
110 
111     s->ti_size = 0;
112     s->ti_rptr = 0;
113     s->ti_wptr = 0;
114 
115     if (s->current_req) {
116         /* Started a new command before the old one finished.  Cancel it.  */
117         scsi_req_cancel(s->current_req);
118         s->async_len = 0;
119     }
120 
121     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
122     if (!s->current_dev) {
123         // No such drive
124         s->rregs[ESP_RSTAT] = 0;
125         s->rregs[ESP_RINTR] = INTR_DC;
126         s->rregs[ESP_RSEQ] = SEQ_0;
127         esp_raise_irq(s);
128         return 0;
129     }
130     return dmalen;
131 }
132 
133 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
134 {
135     int32_t datalen;
136     int lun;
137     SCSIDevice *current_lun;
138 
139     trace_esp_do_busid_cmd(busid);
140     lun = busid & 7;
141     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
142     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
143     datalen = scsi_req_enqueue(s->current_req);
144     s->ti_size = datalen;
145     if (datalen != 0) {
146         s->rregs[ESP_RSTAT] = STAT_TC;
147         s->dma_left = 0;
148         s->dma_counter = 0;
149         if (datalen > 0) {
150             s->rregs[ESP_RSTAT] |= STAT_DI;
151         } else {
152             s->rregs[ESP_RSTAT] |= STAT_DO;
153         }
154         scsi_req_continue(s->current_req);
155     }
156     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
157     s->rregs[ESP_RSEQ] = SEQ_CD;
158     esp_raise_irq(s);
159 }
160 
161 static void do_cmd(ESPState *s, uint8_t *buf)
162 {
163     uint8_t busid = buf[0];
164 
165     do_busid_cmd(s, &buf[1], busid);
166 }
167 
168 static void handle_satn(ESPState *s)
169 {
170     uint8_t buf[32];
171     int len;
172 
173     if (s->dma && !s->dma_enabled) {
174         s->dma_cb = handle_satn;
175         return;
176     }
177     len = get_cmd(s, buf, sizeof(buf));
178     if (len)
179         do_cmd(s, buf);
180 }
181 
182 static void handle_s_without_atn(ESPState *s)
183 {
184     uint8_t buf[32];
185     int len;
186 
187     if (s->dma && !s->dma_enabled) {
188         s->dma_cb = handle_s_without_atn;
189         return;
190     }
191     len = get_cmd(s, buf, sizeof(buf));
192     if (len) {
193         do_busid_cmd(s, buf, 0);
194     }
195 }
196 
197 static void handle_satn_stop(ESPState *s)
198 {
199     if (s->dma && !s->dma_enabled) {
200         s->dma_cb = handle_satn_stop;
201         return;
202     }
203     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
204     if (s->cmdlen) {
205         trace_esp_handle_satn_stop(s->cmdlen);
206         s->do_cmd = 1;
207         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
208         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
209         s->rregs[ESP_RSEQ] = SEQ_CD;
210         esp_raise_irq(s);
211     }
212 }
213 
214 static void write_response(ESPState *s)
215 {
216     trace_esp_write_response(s->status);
217     s->ti_buf[0] = s->status;
218     s->ti_buf[1] = 0;
219     if (s->dma) {
220         s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
221         s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
222         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
223         s->rregs[ESP_RSEQ] = SEQ_CD;
224     } else {
225         s->ti_size = 2;
226         s->ti_rptr = 0;
227         s->ti_wptr = 2;
228         s->rregs[ESP_RFLAGS] = 2;
229     }
230     esp_raise_irq(s);
231 }
232 
233 static void esp_dma_done(ESPState *s)
234 {
235     s->rregs[ESP_RSTAT] |= STAT_TC;
236     s->rregs[ESP_RINTR] = INTR_BS;
237     s->rregs[ESP_RSEQ] = 0;
238     s->rregs[ESP_RFLAGS] = 0;
239     s->rregs[ESP_TCLO] = 0;
240     s->rregs[ESP_TCMID] = 0;
241     s->rregs[ESP_TCHI] = 0;
242     esp_raise_irq(s);
243 }
244 
245 static void esp_do_dma(ESPState *s)
246 {
247     uint32_t len;
248     int to_device;
249 
250     len = s->dma_left;
251     if (s->do_cmd) {
252         trace_esp_do_dma(s->cmdlen, len);
253         assert (s->cmdlen <= sizeof(s->cmdbuf) &&
254                 len <= sizeof(s->cmdbuf) - s->cmdlen);
255         s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
256         return;
257     }
258     if (s->async_len == 0) {
259         /* Defer until data is available.  */
260         return;
261     }
262     if (len > s->async_len) {
263         len = s->async_len;
264     }
265     to_device = (s->ti_size < 0);
266     if (to_device) {
267         s->dma_memory_read(s->dma_opaque, s->async_buf, len);
268     } else {
269         s->dma_memory_write(s->dma_opaque, s->async_buf, len);
270     }
271     s->dma_left -= len;
272     s->async_buf += len;
273     s->async_len -= len;
274     if (to_device)
275         s->ti_size += len;
276     else
277         s->ti_size -= len;
278     if (s->async_len == 0) {
279         scsi_req_continue(s->current_req);
280         /* If there is still data to be read from the device then
281            complete the DMA operation immediately.  Otherwise defer
282            until the scsi layer has completed.  */
283         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
284             return;
285         }
286     }
287 
288     /* Partially filled a scsi buffer. Complete immediately.  */
289     esp_dma_done(s);
290 }
291 
292 static void esp_report_command_complete(ESPState *s, uint32_t status)
293 {
294     trace_esp_command_complete();
295     if (s->ti_size != 0) {
296         trace_esp_command_complete_unexpected();
297     }
298     s->ti_size = 0;
299     s->dma_left = 0;
300     s->async_len = 0;
301     if (status) {
302         trace_esp_command_complete_fail();
303     }
304     s->status = status;
305     s->rregs[ESP_RSTAT] = STAT_ST;
306     esp_dma_done(s);
307     if (s->current_req) {
308         scsi_req_unref(s->current_req);
309         s->current_req = NULL;
310         s->current_dev = NULL;
311     }
312 }
313 
314 void esp_command_complete(SCSIRequest *req, uint32_t status,
315                           size_t resid)
316 {
317     ESPState *s = req->hba_private;
318 
319     if (s->rregs[ESP_RSTAT] & STAT_INT) {
320         /* Defer handling command complete until the previous
321          * interrupt has been handled.
322          */
323         trace_esp_command_complete_deferred();
324         s->deferred_status = status;
325         s->deferred_complete = true;
326         return;
327     }
328     esp_report_command_complete(s, status);
329 }
330 
331 void esp_transfer_data(SCSIRequest *req, uint32_t len)
332 {
333     ESPState *s = req->hba_private;
334 
335     assert(!s->do_cmd);
336     trace_esp_transfer_data(s->dma_left, s->ti_size);
337     s->async_len = len;
338     s->async_buf = scsi_req_get_buf(req);
339     if (s->dma_left) {
340         esp_do_dma(s);
341     } else if (s->dma_counter != 0 && s->ti_size <= 0) {
342         /* If this was the last part of a DMA transfer then the
343            completion interrupt is deferred to here.  */
344         esp_dma_done(s);
345     }
346 }
347 
348 static void handle_ti(ESPState *s)
349 {
350     uint32_t dmalen, minlen;
351 
352     if (s->dma && !s->dma_enabled) {
353         s->dma_cb = handle_ti;
354         return;
355     }
356 
357     dmalen = s->rregs[ESP_TCLO];
358     dmalen |= s->rregs[ESP_TCMID] << 8;
359     dmalen |= s->rregs[ESP_TCHI] << 16;
360     if (dmalen==0) {
361       dmalen=0x10000;
362     }
363     s->dma_counter = dmalen;
364 
365     if (s->do_cmd)
366         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
367     else if (s->ti_size < 0)
368         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
369     else
370         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
371     trace_esp_handle_ti(minlen);
372     if (s->dma) {
373         s->dma_left = minlen;
374         s->rregs[ESP_RSTAT] &= ~STAT_TC;
375         esp_do_dma(s);
376     }
377     if (s->do_cmd) {
378         trace_esp_handle_ti_cmd(s->cmdlen);
379         s->ti_size = 0;
380         s->cmdlen = 0;
381         s->do_cmd = 0;
382         do_cmd(s, s->cmdbuf);
383     }
384 }
385 
386 void esp_hard_reset(ESPState *s)
387 {
388     memset(s->rregs, 0, ESP_REGS);
389     memset(s->wregs, 0, ESP_REGS);
390     s->tchi_written = 0;
391     s->ti_size = 0;
392     s->ti_rptr = 0;
393     s->ti_wptr = 0;
394     s->dma = 0;
395     s->do_cmd = 0;
396     s->dma_cb = NULL;
397 
398     s->rregs[ESP_CFG1] = 7;
399 }
400 
401 static void esp_soft_reset(ESPState *s)
402 {
403     qemu_irq_lower(s->irq);
404     esp_hard_reset(s);
405 }
406 
407 static void parent_esp_reset(ESPState *s, int irq, int level)
408 {
409     if (level) {
410         esp_soft_reset(s);
411     }
412 }
413 
414 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
415 {
416     uint32_t old_val;
417 
418     trace_esp_mem_readb(saddr, s->rregs[saddr]);
419     switch (saddr) {
420     case ESP_FIFO:
421         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
422             /* Data out.  */
423             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
424             s->rregs[ESP_FIFO] = 0;
425         } else if (s->ti_rptr < s->ti_wptr) {
426             s->ti_size--;
427             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
428         }
429         if (s->ti_rptr == s->ti_wptr) {
430             s->ti_rptr = 0;
431             s->ti_wptr = 0;
432         }
433         break;
434     case ESP_RINTR:
435         /* Clear sequence step, interrupt register and all status bits
436            except TC */
437         old_val = s->rregs[ESP_RINTR];
438         s->rregs[ESP_RINTR] = 0;
439         s->rregs[ESP_RSTAT] &= ~STAT_TC;
440         s->rregs[ESP_RSEQ] = SEQ_CD;
441         esp_lower_irq(s);
442         if (s->deferred_complete) {
443             esp_report_command_complete(s, s->deferred_status);
444             s->deferred_complete = false;
445         }
446         return old_val;
447     case ESP_TCHI:
448         /* Return the unique id if the value has never been written */
449         if (!s->tchi_written) {
450             return s->chip_id;
451         }
452     default:
453         break;
454     }
455     return s->rregs[saddr];
456 }
457 
458 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
459 {
460     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
461     switch (saddr) {
462     case ESP_TCHI:
463         s->tchi_written = true;
464         /* fall through */
465     case ESP_TCLO:
466     case ESP_TCMID:
467         s->rregs[ESP_RSTAT] &= ~STAT_TC;
468         break;
469     case ESP_FIFO:
470         if (s->do_cmd) {
471             if (s->cmdlen < ESP_CMDBUF_SZ) {
472                 s->cmdbuf[s->cmdlen++] = val & 0xff;
473             } else {
474                 trace_esp_error_fifo_overrun();
475             }
476         } else if (s->ti_wptr == TI_BUFSZ - 1) {
477             trace_esp_error_fifo_overrun();
478         } else {
479             s->ti_size++;
480             s->ti_buf[s->ti_wptr++] = val & 0xff;
481         }
482         break;
483     case ESP_CMD:
484         s->rregs[saddr] = val;
485         if (val & CMD_DMA) {
486             s->dma = 1;
487             /* Reload DMA counter.  */
488             s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
489             s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
490             s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
491         } else {
492             s->dma = 0;
493         }
494         switch(val & CMD_CMD) {
495         case CMD_NOP:
496             trace_esp_mem_writeb_cmd_nop(val);
497             break;
498         case CMD_FLUSH:
499             trace_esp_mem_writeb_cmd_flush(val);
500             //s->ti_size = 0;
501             s->rregs[ESP_RINTR] = INTR_FC;
502             s->rregs[ESP_RSEQ] = 0;
503             s->rregs[ESP_RFLAGS] = 0;
504             break;
505         case CMD_RESET:
506             trace_esp_mem_writeb_cmd_reset(val);
507             esp_soft_reset(s);
508             break;
509         case CMD_BUSRESET:
510             trace_esp_mem_writeb_cmd_bus_reset(val);
511             s->rregs[ESP_RINTR] = INTR_RST;
512             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
513                 esp_raise_irq(s);
514             }
515             break;
516         case CMD_TI:
517             handle_ti(s);
518             break;
519         case CMD_ICCS:
520             trace_esp_mem_writeb_cmd_iccs(val);
521             write_response(s);
522             s->rregs[ESP_RINTR] = INTR_FC;
523             s->rregs[ESP_RSTAT] |= STAT_MI;
524             break;
525         case CMD_MSGACC:
526             trace_esp_mem_writeb_cmd_msgacc(val);
527             s->rregs[ESP_RINTR] = INTR_DC;
528             s->rregs[ESP_RSEQ] = 0;
529             s->rregs[ESP_RFLAGS] = 0;
530             esp_raise_irq(s);
531             break;
532         case CMD_PAD:
533             trace_esp_mem_writeb_cmd_pad(val);
534             s->rregs[ESP_RSTAT] = STAT_TC;
535             s->rregs[ESP_RINTR] = INTR_FC;
536             s->rregs[ESP_RSEQ] = 0;
537             break;
538         case CMD_SATN:
539             trace_esp_mem_writeb_cmd_satn(val);
540             break;
541         case CMD_RSTATN:
542             trace_esp_mem_writeb_cmd_rstatn(val);
543             break;
544         case CMD_SEL:
545             trace_esp_mem_writeb_cmd_sel(val);
546             handle_s_without_atn(s);
547             break;
548         case CMD_SELATN:
549             trace_esp_mem_writeb_cmd_selatn(val);
550             handle_satn(s);
551             break;
552         case CMD_SELATNS:
553             trace_esp_mem_writeb_cmd_selatns(val);
554             handle_satn_stop(s);
555             break;
556         case CMD_ENSEL:
557             trace_esp_mem_writeb_cmd_ensel(val);
558             s->rregs[ESP_RINTR] = 0;
559             break;
560         case CMD_DISSEL:
561             trace_esp_mem_writeb_cmd_dissel(val);
562             s->rregs[ESP_RINTR] = 0;
563             esp_raise_irq(s);
564             break;
565         default:
566             trace_esp_error_unhandled_command(val);
567             break;
568         }
569         break;
570     case ESP_WBUSID ... ESP_WSYNO:
571         break;
572     case ESP_CFG1:
573     case ESP_CFG2: case ESP_CFG3:
574     case ESP_RES3: case ESP_RES4:
575         s->rregs[saddr] = val;
576         break;
577     case ESP_WCCF ... ESP_WTEST:
578         break;
579     default:
580         trace_esp_error_invalid_write(val, saddr);
581         return;
582     }
583     s->wregs[saddr] = val;
584 }
585 
586 static bool esp_mem_accepts(void *opaque, hwaddr addr,
587                             unsigned size, bool is_write,
588                             MemTxAttrs attrs)
589 {
590     return (size == 1) || (is_write && size == 4);
591 }
592 
593 const VMStateDescription vmstate_esp = {
594     .name ="esp",
595     .version_id = 4,
596     .minimum_version_id = 3,
597     .fields = (VMStateField[]) {
598         VMSTATE_BUFFER(rregs, ESPState),
599         VMSTATE_BUFFER(wregs, ESPState),
600         VMSTATE_INT32(ti_size, ESPState),
601         VMSTATE_UINT32(ti_rptr, ESPState),
602         VMSTATE_UINT32(ti_wptr, ESPState),
603         VMSTATE_BUFFER(ti_buf, ESPState),
604         VMSTATE_UINT32(status, ESPState),
605         VMSTATE_UINT32(deferred_status, ESPState),
606         VMSTATE_BOOL(deferred_complete, ESPState),
607         VMSTATE_UINT32(dma, ESPState),
608         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
609         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
610         VMSTATE_UINT32(cmdlen, ESPState),
611         VMSTATE_UINT32(do_cmd, ESPState),
612         VMSTATE_UINT32(dma_left, ESPState),
613         VMSTATE_END_OF_LIST()
614     }
615 };
616 
617 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
618                                  uint64_t val, unsigned int size)
619 {
620     SysBusESPState *sysbus = opaque;
621     uint32_t saddr;
622 
623     saddr = addr >> sysbus->it_shift;
624     esp_reg_write(&sysbus->esp, saddr, val);
625 }
626 
627 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
628                                     unsigned int size)
629 {
630     SysBusESPState *sysbus = opaque;
631     uint32_t saddr;
632 
633     saddr = addr >> sysbus->it_shift;
634     return esp_reg_read(&sysbus->esp, saddr);
635 }
636 
637 static const MemoryRegionOps sysbus_esp_mem_ops = {
638     .read = sysbus_esp_mem_read,
639     .write = sysbus_esp_mem_write,
640     .endianness = DEVICE_NATIVE_ENDIAN,
641     .valid.accepts = esp_mem_accepts,
642 };
643 
644 static const struct SCSIBusInfo esp_scsi_info = {
645     .tcq = false,
646     .max_target = ESP_MAX_DEVS,
647     .max_lun = 7,
648 
649     .transfer_data = esp_transfer_data,
650     .complete = esp_command_complete,
651     .cancel = esp_request_cancelled
652 };
653 
654 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
655 {
656     SysBusESPState *sysbus = ESP_STATE(opaque);
657     ESPState *s = &sysbus->esp;
658 
659     switch (irq) {
660     case 0:
661         parent_esp_reset(s, irq, level);
662         break;
663     case 1:
664         esp_dma_enable(opaque, irq, level);
665         break;
666     }
667 }
668 
669 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
670 {
671     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
672     SysBusESPState *sysbus = ESP_STATE(dev);
673     ESPState *s = &sysbus->esp;
674 
675     sysbus_init_irq(sbd, &s->irq);
676     assert(sysbus->it_shift != -1);
677 
678     s->chip_id = TCHI_FAS100A;
679     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
680                           sysbus, "esp", ESP_REGS << sysbus->it_shift);
681     sysbus_init_mmio(sbd, &sysbus->iomem);
682 
683     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
684 
685     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
686 }
687 
688 static void sysbus_esp_hard_reset(DeviceState *dev)
689 {
690     SysBusESPState *sysbus = ESP_STATE(dev);
691     esp_hard_reset(&sysbus->esp);
692 }
693 
694 static const VMStateDescription vmstate_sysbus_esp_scsi = {
695     .name = "sysbusespscsi",
696     .version_id = 1,
697     .minimum_version_id = 1,
698     .fields = (VMStateField[]) {
699         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
700         VMSTATE_END_OF_LIST()
701     }
702 };
703 
704 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
705 {
706     DeviceClass *dc = DEVICE_CLASS(klass);
707 
708     dc->realize = sysbus_esp_realize;
709     dc->reset = sysbus_esp_hard_reset;
710     dc->vmsd = &vmstate_sysbus_esp_scsi;
711     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
712 }
713 
714 static const TypeInfo sysbus_esp_info = {
715     .name          = TYPE_ESP,
716     .parent        = TYPE_SYS_BUS_DEVICE,
717     .instance_size = sizeof(SysBusESPState),
718     .class_init    = sysbus_esp_class_init,
719 };
720 
721 static void esp_register_types(void)
722 {
723     type_register_static(&sysbus_esp_info);
724 }
725 
726 type_init(esp_register_types)
727