xref: /openbmc/qemu/hw/scsi/esp.c (revision 1be82d89)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "hw/scsi/esp.h"
29 #include "trace.h"
30 #include "qemu/log.h"
31 
32 /*
33  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
34  * also produced as NCR89C100. See
35  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
36  * and
37  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
38  */
39 
40 static void esp_raise_irq(ESPState *s)
41 {
42     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
43         s->rregs[ESP_RSTAT] |= STAT_INT;
44         qemu_irq_raise(s->irq);
45         trace_esp_raise_irq();
46     }
47 }
48 
49 static void esp_lower_irq(ESPState *s)
50 {
51     if (s->rregs[ESP_RSTAT] & STAT_INT) {
52         s->rregs[ESP_RSTAT] &= ~STAT_INT;
53         qemu_irq_lower(s->irq);
54         trace_esp_lower_irq();
55     }
56 }
57 
58 void esp_dma_enable(ESPState *s, int irq, int level)
59 {
60     if (level) {
61         s->dma_enabled = 1;
62         trace_esp_dma_enable();
63         if (s->dma_cb) {
64             s->dma_cb(s);
65             s->dma_cb = NULL;
66         }
67     } else {
68         trace_esp_dma_disable();
69         s->dma_enabled = 0;
70     }
71 }
72 
73 void esp_request_cancelled(SCSIRequest *req)
74 {
75     ESPState *s = req->hba_private;
76 
77     if (req == s->current_req) {
78         scsi_req_unref(s->current_req);
79         s->current_req = NULL;
80         s->current_dev = NULL;
81     }
82 }
83 
84 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
85 {
86     uint32_t dmalen;
87     int target;
88 
89     target = s->wregs[ESP_WBUSID] & BUSID_DID;
90     if (s->dma) {
91         dmalen = s->rregs[ESP_TCLO];
92         dmalen |= s->rregs[ESP_TCMID] << 8;
93         dmalen |= s->rregs[ESP_TCHI] << 16;
94         if (dmalen > buflen) {
95             return 0;
96         }
97         s->dma_memory_read(s->dma_opaque, buf, dmalen);
98     } else {
99         dmalen = s->ti_size;
100         if (dmalen > TI_BUFSZ) {
101             return 0;
102         }
103         memcpy(buf, s->ti_buf, dmalen);
104         buf[0] = buf[2] >> 5;
105     }
106     trace_esp_get_cmd(dmalen, target);
107 
108     s->ti_size = 0;
109     s->ti_rptr = 0;
110     s->ti_wptr = 0;
111 
112     if (s->current_req) {
113         /* Started a new command before the old one finished.  Cancel it.  */
114         scsi_req_cancel(s->current_req);
115         s->async_len = 0;
116     }
117 
118     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
119     if (!s->current_dev) {
120         // No such drive
121         s->rregs[ESP_RSTAT] = 0;
122         s->rregs[ESP_RINTR] = INTR_DC;
123         s->rregs[ESP_RSEQ] = SEQ_0;
124         esp_raise_irq(s);
125         return 0;
126     }
127     return dmalen;
128 }
129 
130 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
131 {
132     int32_t datalen;
133     int lun;
134     SCSIDevice *current_lun;
135 
136     trace_esp_do_busid_cmd(busid);
137     lun = busid & 7;
138     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
139     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
140     datalen = scsi_req_enqueue(s->current_req);
141     s->ti_size = datalen;
142     if (datalen != 0) {
143         s->rregs[ESP_RSTAT] = STAT_TC;
144         s->dma_left = 0;
145         s->dma_counter = 0;
146         if (datalen > 0) {
147             s->rregs[ESP_RSTAT] |= STAT_DI;
148         } else {
149             s->rregs[ESP_RSTAT] |= STAT_DO;
150         }
151         scsi_req_continue(s->current_req);
152     }
153     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
154     s->rregs[ESP_RSEQ] = SEQ_CD;
155     esp_raise_irq(s);
156 }
157 
158 static void do_cmd(ESPState *s, uint8_t *buf)
159 {
160     uint8_t busid = buf[0];
161 
162     do_busid_cmd(s, &buf[1], busid);
163 }
164 
165 static void handle_satn(ESPState *s)
166 {
167     uint8_t buf[32];
168     int len;
169 
170     if (s->dma && !s->dma_enabled) {
171         s->dma_cb = handle_satn;
172         return;
173     }
174     len = get_cmd(s, buf, sizeof(buf));
175     if (len)
176         do_cmd(s, buf);
177 }
178 
179 static void handle_s_without_atn(ESPState *s)
180 {
181     uint8_t buf[32];
182     int len;
183 
184     if (s->dma && !s->dma_enabled) {
185         s->dma_cb = handle_s_without_atn;
186         return;
187     }
188     len = get_cmd(s, buf, sizeof(buf));
189     if (len) {
190         do_busid_cmd(s, buf, 0);
191     }
192 }
193 
194 static void handle_satn_stop(ESPState *s)
195 {
196     if (s->dma && !s->dma_enabled) {
197         s->dma_cb = handle_satn_stop;
198         return;
199     }
200     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
201     if (s->cmdlen) {
202         trace_esp_handle_satn_stop(s->cmdlen);
203         s->do_cmd = 1;
204         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
205         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
206         s->rregs[ESP_RSEQ] = SEQ_CD;
207         esp_raise_irq(s);
208     }
209 }
210 
211 static void write_response(ESPState *s)
212 {
213     trace_esp_write_response(s->status);
214     s->ti_buf[0] = s->status;
215     s->ti_buf[1] = 0;
216     if (s->dma) {
217         s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
218         s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
219         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
220         s->rregs[ESP_RSEQ] = SEQ_CD;
221     } else {
222         s->ti_size = 2;
223         s->ti_rptr = 0;
224         s->ti_wptr = 2;
225         s->rregs[ESP_RFLAGS] = 2;
226     }
227     esp_raise_irq(s);
228 }
229 
230 static void esp_dma_done(ESPState *s)
231 {
232     s->rregs[ESP_RSTAT] |= STAT_TC;
233     s->rregs[ESP_RINTR] = INTR_BS;
234     s->rregs[ESP_RSEQ] = 0;
235     s->rregs[ESP_RFLAGS] = 0;
236     s->rregs[ESP_TCLO] = 0;
237     s->rregs[ESP_TCMID] = 0;
238     s->rregs[ESP_TCHI] = 0;
239     esp_raise_irq(s);
240 }
241 
242 static void esp_do_dma(ESPState *s)
243 {
244     uint32_t len;
245     int to_device;
246 
247     len = s->dma_left;
248     if (s->do_cmd) {
249         trace_esp_do_dma(s->cmdlen, len);
250         assert (s->cmdlen <= sizeof(s->cmdbuf) &&
251                 len <= sizeof(s->cmdbuf) - s->cmdlen);
252         s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
253         return;
254     }
255     if (s->async_len == 0) {
256         /* Defer until data is available.  */
257         return;
258     }
259     if (len > s->async_len) {
260         len = s->async_len;
261     }
262     to_device = (s->ti_size < 0);
263     if (to_device) {
264         s->dma_memory_read(s->dma_opaque, s->async_buf, len);
265     } else {
266         s->dma_memory_write(s->dma_opaque, s->async_buf, len);
267     }
268     s->dma_left -= len;
269     s->async_buf += len;
270     s->async_len -= len;
271     if (to_device)
272         s->ti_size += len;
273     else
274         s->ti_size -= len;
275     if (s->async_len == 0) {
276         scsi_req_continue(s->current_req);
277         /* If there is still data to be read from the device then
278            complete the DMA operation immediately.  Otherwise defer
279            until the scsi layer has completed.  */
280         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
281             return;
282         }
283     }
284 
285     /* Partially filled a scsi buffer. Complete immediately.  */
286     esp_dma_done(s);
287 }
288 
289 static void esp_report_command_complete(ESPState *s, uint32_t status)
290 {
291     trace_esp_command_complete();
292     if (s->ti_size != 0) {
293         trace_esp_command_complete_unexpected();
294     }
295     s->ti_size = 0;
296     s->dma_left = 0;
297     s->async_len = 0;
298     if (status) {
299         trace_esp_command_complete_fail();
300     }
301     s->status = status;
302     s->rregs[ESP_RSTAT] = STAT_ST;
303     esp_dma_done(s);
304     if (s->current_req) {
305         scsi_req_unref(s->current_req);
306         s->current_req = NULL;
307         s->current_dev = NULL;
308     }
309 }
310 
311 void esp_command_complete(SCSIRequest *req, uint32_t status,
312                           size_t resid)
313 {
314     ESPState *s = req->hba_private;
315 
316     if (s->rregs[ESP_RSTAT] & STAT_INT) {
317         /* Defer handling command complete until the previous
318          * interrupt has been handled.
319          */
320         trace_esp_command_complete_deferred();
321         s->deferred_status = status;
322         s->deferred_complete = true;
323         return;
324     }
325     esp_report_command_complete(s, status);
326 }
327 
328 void esp_transfer_data(SCSIRequest *req, uint32_t len)
329 {
330     ESPState *s = req->hba_private;
331 
332     assert(!s->do_cmd);
333     trace_esp_transfer_data(s->dma_left, s->ti_size);
334     s->async_len = len;
335     s->async_buf = scsi_req_get_buf(req);
336     if (s->dma_left) {
337         esp_do_dma(s);
338     } else if (s->dma_counter != 0 && s->ti_size <= 0) {
339         /* If this was the last part of a DMA transfer then the
340            completion interrupt is deferred to here.  */
341         esp_dma_done(s);
342     }
343 }
344 
345 static void handle_ti(ESPState *s)
346 {
347     uint32_t dmalen, minlen;
348 
349     if (s->dma && !s->dma_enabled) {
350         s->dma_cb = handle_ti;
351         return;
352     }
353 
354     dmalen = s->rregs[ESP_TCLO];
355     dmalen |= s->rregs[ESP_TCMID] << 8;
356     dmalen |= s->rregs[ESP_TCHI] << 16;
357     if (dmalen==0) {
358       dmalen=0x10000;
359     }
360     s->dma_counter = dmalen;
361 
362     if (s->do_cmd)
363         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
364     else if (s->ti_size < 0)
365         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
366     else
367         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
368     trace_esp_handle_ti(minlen);
369     if (s->dma) {
370         s->dma_left = minlen;
371         s->rregs[ESP_RSTAT] &= ~STAT_TC;
372         esp_do_dma(s);
373     }
374     if (s->do_cmd) {
375         trace_esp_handle_ti_cmd(s->cmdlen);
376         s->ti_size = 0;
377         s->cmdlen = 0;
378         s->do_cmd = 0;
379         do_cmd(s, s->cmdbuf);
380     }
381 }
382 
383 void esp_hard_reset(ESPState *s)
384 {
385     memset(s->rregs, 0, ESP_REGS);
386     memset(s->wregs, 0, ESP_REGS);
387     s->tchi_written = 0;
388     s->ti_size = 0;
389     s->ti_rptr = 0;
390     s->ti_wptr = 0;
391     s->dma = 0;
392     s->do_cmd = 0;
393     s->dma_cb = NULL;
394 
395     s->rregs[ESP_CFG1] = 7;
396 }
397 
398 static void esp_soft_reset(ESPState *s)
399 {
400     qemu_irq_lower(s->irq);
401     esp_hard_reset(s);
402 }
403 
404 static void parent_esp_reset(ESPState *s, int irq, int level)
405 {
406     if (level) {
407         esp_soft_reset(s);
408     }
409 }
410 
411 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
412 {
413     uint32_t old_val;
414 
415     trace_esp_mem_readb(saddr, s->rregs[saddr]);
416     switch (saddr) {
417     case ESP_FIFO:
418         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
419             /* Data out.  */
420             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
421             s->rregs[ESP_FIFO] = 0;
422         } else if (s->ti_rptr < s->ti_wptr) {
423             s->ti_size--;
424             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
425         }
426         if (s->ti_rptr == s->ti_wptr) {
427             s->ti_rptr = 0;
428             s->ti_wptr = 0;
429         }
430         break;
431     case ESP_RINTR:
432         /* Clear sequence step, interrupt register and all status bits
433            except TC */
434         old_val = s->rregs[ESP_RINTR];
435         s->rregs[ESP_RINTR] = 0;
436         s->rregs[ESP_RSTAT] &= ~STAT_TC;
437         s->rregs[ESP_RSEQ] = SEQ_CD;
438         esp_lower_irq(s);
439         if (s->deferred_complete) {
440             esp_report_command_complete(s, s->deferred_status);
441             s->deferred_complete = false;
442         }
443         return old_val;
444     case ESP_TCHI:
445         /* Return the unique id if the value has never been written */
446         if (!s->tchi_written) {
447             return s->chip_id;
448         }
449     default:
450         break;
451     }
452     return s->rregs[saddr];
453 }
454 
455 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
456 {
457     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
458     switch (saddr) {
459     case ESP_TCHI:
460         s->tchi_written = true;
461         /* fall through */
462     case ESP_TCLO:
463     case ESP_TCMID:
464         s->rregs[ESP_RSTAT] &= ~STAT_TC;
465         break;
466     case ESP_FIFO:
467         if (s->do_cmd) {
468             if (s->cmdlen < ESP_CMDBUF_SZ) {
469                 s->cmdbuf[s->cmdlen++] = val & 0xff;
470             } else {
471                 trace_esp_error_fifo_overrun();
472             }
473         } else if (s->ti_wptr == TI_BUFSZ - 1) {
474             trace_esp_error_fifo_overrun();
475         } else {
476             s->ti_size++;
477             s->ti_buf[s->ti_wptr++] = val & 0xff;
478         }
479         break;
480     case ESP_CMD:
481         s->rregs[saddr] = val;
482         if (val & CMD_DMA) {
483             s->dma = 1;
484             /* Reload DMA counter.  */
485             s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
486             s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
487             s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
488         } else {
489             s->dma = 0;
490         }
491         switch(val & CMD_CMD) {
492         case CMD_NOP:
493             trace_esp_mem_writeb_cmd_nop(val);
494             break;
495         case CMD_FLUSH:
496             trace_esp_mem_writeb_cmd_flush(val);
497             //s->ti_size = 0;
498             s->rregs[ESP_RINTR] = INTR_FC;
499             s->rregs[ESP_RSEQ] = 0;
500             s->rregs[ESP_RFLAGS] = 0;
501             break;
502         case CMD_RESET:
503             trace_esp_mem_writeb_cmd_reset(val);
504             esp_soft_reset(s);
505             break;
506         case CMD_BUSRESET:
507             trace_esp_mem_writeb_cmd_bus_reset(val);
508             s->rregs[ESP_RINTR] = INTR_RST;
509             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
510                 esp_raise_irq(s);
511             }
512             break;
513         case CMD_TI:
514             handle_ti(s);
515             break;
516         case CMD_ICCS:
517             trace_esp_mem_writeb_cmd_iccs(val);
518             write_response(s);
519             s->rregs[ESP_RINTR] = INTR_FC;
520             s->rregs[ESP_RSTAT] |= STAT_MI;
521             break;
522         case CMD_MSGACC:
523             trace_esp_mem_writeb_cmd_msgacc(val);
524             s->rregs[ESP_RINTR] = INTR_DC;
525             s->rregs[ESP_RSEQ] = 0;
526             s->rregs[ESP_RFLAGS] = 0;
527             esp_raise_irq(s);
528             break;
529         case CMD_PAD:
530             trace_esp_mem_writeb_cmd_pad(val);
531             s->rregs[ESP_RSTAT] = STAT_TC;
532             s->rregs[ESP_RINTR] = INTR_FC;
533             s->rregs[ESP_RSEQ] = 0;
534             break;
535         case CMD_SATN:
536             trace_esp_mem_writeb_cmd_satn(val);
537             break;
538         case CMD_RSTATN:
539             trace_esp_mem_writeb_cmd_rstatn(val);
540             break;
541         case CMD_SEL:
542             trace_esp_mem_writeb_cmd_sel(val);
543             handle_s_without_atn(s);
544             break;
545         case CMD_SELATN:
546             trace_esp_mem_writeb_cmd_selatn(val);
547             handle_satn(s);
548             break;
549         case CMD_SELATNS:
550             trace_esp_mem_writeb_cmd_selatns(val);
551             handle_satn_stop(s);
552             break;
553         case CMD_ENSEL:
554             trace_esp_mem_writeb_cmd_ensel(val);
555             s->rregs[ESP_RINTR] = 0;
556             break;
557         case CMD_DISSEL:
558             trace_esp_mem_writeb_cmd_dissel(val);
559             s->rregs[ESP_RINTR] = 0;
560             esp_raise_irq(s);
561             break;
562         default:
563             trace_esp_error_unhandled_command(val);
564             break;
565         }
566         break;
567     case ESP_WBUSID ... ESP_WSYNO:
568         break;
569     case ESP_CFG1:
570     case ESP_CFG2: case ESP_CFG3:
571     case ESP_RES3: case ESP_RES4:
572         s->rregs[saddr] = val;
573         break;
574     case ESP_WCCF ... ESP_WTEST:
575         break;
576     default:
577         trace_esp_error_invalid_write(val, saddr);
578         return;
579     }
580     s->wregs[saddr] = val;
581 }
582 
583 static bool esp_mem_accepts(void *opaque, hwaddr addr,
584                             unsigned size, bool is_write,
585                             MemTxAttrs attrs)
586 {
587     return (size == 1) || (is_write && size == 4);
588 }
589 
590 const VMStateDescription vmstate_esp = {
591     .name ="esp",
592     .version_id = 4,
593     .minimum_version_id = 3,
594     .fields = (VMStateField[]) {
595         VMSTATE_BUFFER(rregs, ESPState),
596         VMSTATE_BUFFER(wregs, ESPState),
597         VMSTATE_INT32(ti_size, ESPState),
598         VMSTATE_UINT32(ti_rptr, ESPState),
599         VMSTATE_UINT32(ti_wptr, ESPState),
600         VMSTATE_BUFFER(ti_buf, ESPState),
601         VMSTATE_UINT32(status, ESPState),
602         VMSTATE_UINT32(deferred_status, ESPState),
603         VMSTATE_BOOL(deferred_complete, ESPState),
604         VMSTATE_UINT32(dma, ESPState),
605         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
606         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
607         VMSTATE_UINT32(cmdlen, ESPState),
608         VMSTATE_UINT32(do_cmd, ESPState),
609         VMSTATE_UINT32(dma_left, ESPState),
610         VMSTATE_END_OF_LIST()
611     }
612 };
613 
614 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
615                                  uint64_t val, unsigned int size)
616 {
617     SysBusESPState *sysbus = opaque;
618     uint32_t saddr;
619 
620     saddr = addr >> sysbus->it_shift;
621     esp_reg_write(&sysbus->esp, saddr, val);
622 }
623 
624 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
625                                     unsigned int size)
626 {
627     SysBusESPState *sysbus = opaque;
628     uint32_t saddr;
629 
630     saddr = addr >> sysbus->it_shift;
631     return esp_reg_read(&sysbus->esp, saddr);
632 }
633 
634 static const MemoryRegionOps sysbus_esp_mem_ops = {
635     .read = sysbus_esp_mem_read,
636     .write = sysbus_esp_mem_write,
637     .endianness = DEVICE_NATIVE_ENDIAN,
638     .valid.accepts = esp_mem_accepts,
639 };
640 
641 static const struct SCSIBusInfo esp_scsi_info = {
642     .tcq = false,
643     .max_target = ESP_MAX_DEVS,
644     .max_lun = 7,
645 
646     .transfer_data = esp_transfer_data,
647     .complete = esp_command_complete,
648     .cancel = esp_request_cancelled
649 };
650 
651 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
652 {
653     SysBusESPState *sysbus = ESP_STATE(opaque);
654     ESPState *s = &sysbus->esp;
655 
656     switch (irq) {
657     case 0:
658         parent_esp_reset(s, irq, level);
659         break;
660     case 1:
661         esp_dma_enable(opaque, irq, level);
662         break;
663     }
664 }
665 
666 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
667 {
668     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
669     SysBusESPState *sysbus = ESP_STATE(dev);
670     ESPState *s = &sysbus->esp;
671 
672     sysbus_init_irq(sbd, &s->irq);
673     assert(sysbus->it_shift != -1);
674 
675     s->chip_id = TCHI_FAS100A;
676     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
677                           sysbus, "esp", ESP_REGS << sysbus->it_shift);
678     sysbus_init_mmio(sbd, &sysbus->iomem);
679 
680     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
681 
682     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
683 }
684 
685 static void sysbus_esp_hard_reset(DeviceState *dev)
686 {
687     SysBusESPState *sysbus = ESP_STATE(dev);
688     esp_hard_reset(&sysbus->esp);
689 }
690 
691 static const VMStateDescription vmstate_sysbus_esp_scsi = {
692     .name = "sysbusespscsi",
693     .version_id = 1,
694     .minimum_version_id = 1,
695     .fields = (VMStateField[]) {
696         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
697         VMSTATE_END_OF_LIST()
698     }
699 };
700 
701 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
702 {
703     DeviceClass *dc = DEVICE_CLASS(klass);
704 
705     dc->realize = sysbus_esp_realize;
706     dc->reset = sysbus_esp_hard_reset;
707     dc->vmsd = &vmstate_sysbus_esp_scsi;
708     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
709 }
710 
711 static const TypeInfo sysbus_esp_info = {
712     .name          = TYPE_ESP,
713     .parent        = TYPE_SYS_BUS_DEVICE,
714     .instance_size = sizeof(SysBusESPState),
715     .class_init    = sysbus_esp_class_init,
716 };
717 
718 static void esp_register_types(void)
719 {
720     type_register_static(&sysbus_esp_info);
721 }
722 
723 type_init(esp_register_types)
724