xref: /openbmc/qemu/hw/scsi/esp-pci.c (revision 97ef5f88)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/pci/pci.h"
28 #include "hw/irq.h"
29 #include "hw/nvram/eeprom93xx.h"
30 #include "hw/scsi/esp.h"
31 #include "migration/vmstate.h"
32 #include "trace.h"
33 #include "qapi/error.h"
34 #include "qemu/log.h"
35 #include "qemu/module.h"
36 #include "qom/object.h"
37 
38 #define TYPE_AM53C974_DEVICE "am53c974"
39 
40 typedef struct PCIESPState PCIESPState;
41 DECLARE_INSTANCE_CHECKER(PCIESPState, PCI_ESP,
42                          TYPE_AM53C974_DEVICE)
43 
44 #define DMA_CMD   0x0
45 #define DMA_STC   0x1
46 #define DMA_SPA   0x2
47 #define DMA_WBC   0x3
48 #define DMA_WAC   0x4
49 #define DMA_STAT  0x5
50 #define DMA_SMDLA 0x6
51 #define DMA_WMAC  0x7
52 
53 #define DMA_CMD_MASK   0x03
54 #define DMA_CMD_DIAG   0x04
55 #define DMA_CMD_MDL    0x10
56 #define DMA_CMD_INTE_P 0x20
57 #define DMA_CMD_INTE_D 0x40
58 #define DMA_CMD_DIR    0x80
59 
60 #define DMA_STAT_PWDN    0x01
61 #define DMA_STAT_ERROR   0x02
62 #define DMA_STAT_ABORT   0x04
63 #define DMA_STAT_DONE    0x08
64 #define DMA_STAT_SCSIINT 0x10
65 #define DMA_STAT_BCMBLT  0x20
66 
67 #define SBAC_STATUS (1 << 24)
68 
69 struct PCIESPState {
70     /*< private >*/
71     PCIDevice parent_obj;
72     /*< public >*/
73 
74     MemoryRegion io;
75     uint32_t dma_regs[8];
76     uint32_t sbac;
77     ESPState esp;
78 };
79 
80 static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
81 {
82     ESPState *s = ESP(&pci->esp);
83 
84     trace_esp_pci_dma_idle(val);
85     esp_dma_enable(s, 0, 0);
86 }
87 
88 static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
89 {
90     trace_esp_pci_dma_blast(val);
91     qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
92 }
93 
94 static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
95 {
96     ESPState *s = ESP(&pci->esp);
97 
98     trace_esp_pci_dma_abort(val);
99     if (s->current_req) {
100         scsi_req_cancel(s->current_req);
101     }
102 }
103 
104 static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
105 {
106     ESPState *s = ESP(&pci->esp);
107 
108     trace_esp_pci_dma_start(val);
109 
110     pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
111     pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
112     pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
113 
114     pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
115                                | DMA_STAT_DONE | DMA_STAT_ABORT
116                                | DMA_STAT_ERROR | DMA_STAT_PWDN);
117 
118     esp_dma_enable(s, 0, 1);
119 }
120 
121 static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
122 {
123     trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
124     switch (saddr) {
125     case DMA_CMD:
126         pci->dma_regs[saddr] = val;
127         switch (val & DMA_CMD_MASK) {
128         case 0x0: /* IDLE */
129             esp_pci_handle_idle(pci, val);
130             break;
131         case 0x1: /* BLAST */
132             esp_pci_handle_blast(pci, val);
133             break;
134         case 0x2: /* ABORT */
135             esp_pci_handle_abort(pci, val);
136             break;
137         case 0x3: /* START */
138             esp_pci_handle_start(pci, val);
139             break;
140         default: /* can't happen */
141             abort();
142         }
143         break;
144     case DMA_STC:
145     case DMA_SPA:
146     case DMA_SMDLA:
147         pci->dma_regs[saddr] = val;
148         break;
149     case DMA_STAT:
150         if (pci->sbac & SBAC_STATUS) {
151             /* clear some bits on write */
152             uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
153             pci->dma_regs[DMA_STAT] &= ~(val & mask);
154         }
155         break;
156     default:
157         trace_esp_pci_error_invalid_write_dma(val, saddr);
158         return;
159     }
160 }
161 
162 static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
163 {
164     ESPState *s = ESP(&pci->esp);
165     uint32_t val;
166 
167     val = pci->dma_regs[saddr];
168     if (saddr == DMA_STAT) {
169         if (s->rregs[ESP_RSTAT] & STAT_INT) {
170             val |= DMA_STAT_SCSIINT;
171         }
172         if (!(pci->sbac & SBAC_STATUS)) {
173             pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
174                                          DMA_STAT_DONE);
175         }
176     }
177 
178     trace_esp_pci_dma_read(saddr, val);
179     return val;
180 }
181 
182 static void esp_pci_io_write(void *opaque, hwaddr addr,
183                              uint64_t val, unsigned int size)
184 {
185     PCIESPState *pci = opaque;
186     ESPState *s = ESP(&pci->esp);
187 
188     if (size < 4 || addr & 3) {
189         /* need to upgrade request: we only support 4-bytes accesses */
190         uint32_t current = 0, mask;
191         int shift;
192 
193         if (addr < 0x40) {
194             current = s->wregs[addr >> 2];
195         } else if (addr < 0x60) {
196             current = pci->dma_regs[(addr - 0x40) >> 2];
197         } else if (addr < 0x74) {
198             current = pci->sbac;
199         }
200 
201         shift = (4 - size) * 8;
202         mask = (~(uint32_t)0 << shift) >> shift;
203 
204         shift = ((4 - (addr & 3)) & 3) * 8;
205         val <<= shift;
206         val |= current & ~(mask << shift);
207         addr &= ~3;
208         size = 4;
209     }
210     g_assert(size >= 4);
211 
212     if (addr < 0x40) {
213         /* SCSI core reg */
214         esp_reg_write(s, addr >> 2, val);
215     } else if (addr < 0x60) {
216         /* PCI DMA CCB */
217         esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
218     } else if (addr == 0x70) {
219         /* DMA SCSI Bus and control */
220         trace_esp_pci_sbac_write(pci->sbac, val);
221         pci->sbac = val;
222     } else {
223         trace_esp_pci_error_invalid_write((int)addr);
224     }
225 }
226 
227 static uint64_t esp_pci_io_read(void *opaque, hwaddr addr,
228                                 unsigned int size)
229 {
230     PCIESPState *pci = opaque;
231     ESPState *s = ESP(&pci->esp);
232     uint32_t ret;
233 
234     if (addr < 0x40) {
235         /* SCSI core reg */
236         ret = esp_reg_read(s, addr >> 2);
237     } else if (addr < 0x60) {
238         /* PCI DMA CCB */
239         ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
240     } else if (addr == 0x70) {
241         /* DMA SCSI Bus and control */
242         trace_esp_pci_sbac_read(pci->sbac);
243         ret = pci->sbac;
244     } else {
245         /* Invalid region */
246         trace_esp_pci_error_invalid_read((int)addr);
247         ret = 0;
248     }
249 
250     /* give only requested data */
251     ret >>= (addr & 3) * 8;
252     ret &= ~(~(uint64_t)0 << (8 * size));
253 
254     return ret;
255 }
256 
257 static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
258                                   DMADirection dir)
259 {
260     dma_addr_t addr;
261     DMADirection expected_dir;
262 
263     if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
264         expected_dir = DMA_DIRECTION_FROM_DEVICE;
265     } else {
266         expected_dir = DMA_DIRECTION_TO_DEVICE;
267     }
268 
269     if (dir != expected_dir) {
270         trace_esp_pci_error_invalid_dma_direction();
271         return;
272     }
273 
274     if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
275         qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
276     }
277 
278     addr = pci->dma_regs[DMA_SPA];
279     if (pci->dma_regs[DMA_WBC] < len) {
280         len = pci->dma_regs[DMA_WBC];
281     }
282 
283     pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir);
284 
285     /* update status registers */
286     pci->dma_regs[DMA_WBC] -= len;
287     pci->dma_regs[DMA_WAC] += len;
288     if (pci->dma_regs[DMA_WBC] == 0) {
289         pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
290     }
291 }
292 
293 static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
294 {
295     PCIESPState *pci = opaque;
296     esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
297 }
298 
299 static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
300 {
301     PCIESPState *pci = opaque;
302     esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
303 }
304 
305 static const MemoryRegionOps esp_pci_io_ops = {
306     .read = esp_pci_io_read,
307     .write = esp_pci_io_write,
308     .endianness = DEVICE_LITTLE_ENDIAN,
309     .impl = {
310         .min_access_size = 1,
311         .max_access_size = 4,
312     },
313 };
314 
315 static void esp_pci_hard_reset(DeviceState *dev)
316 {
317     PCIESPState *pci = PCI_ESP(dev);
318     ESPState *s = ESP(&pci->esp);
319 
320     esp_hard_reset(s);
321     pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
322                               | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
323     pci->dma_regs[DMA_WBC] &= ~0xffff;
324     pci->dma_regs[DMA_WAC] = 0xffffffff;
325     pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
326                                | DMA_STAT_DONE | DMA_STAT_ABORT
327                                | DMA_STAT_ERROR);
328     pci->dma_regs[DMA_WMAC] = 0xfffffffd;
329 }
330 
331 static const VMStateDescription vmstate_esp_pci_scsi = {
332     .name = "pciespscsi",
333     .version_id = 2,
334     .minimum_version_id = 1,
335     .fields = (VMStateField[]) {
336         VMSTATE_PCI_DEVICE(parent_obj, PCIESPState),
337         VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
338         VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2),
339         VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
340         VMSTATE_END_OF_LIST()
341     }
342 };
343 
344 static void esp_pci_command_complete(SCSIRequest *req, size_t resid)
345 {
346     ESPState *s = req->hba_private;
347     PCIESPState *pci = container_of(s, PCIESPState, esp);
348 
349     esp_command_complete(req, resid);
350     pci->dma_regs[DMA_WBC] = 0;
351     pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
352 }
353 
354 static const struct SCSIBusInfo esp_pci_scsi_info = {
355     .tcq = false,
356     .max_target = ESP_MAX_DEVS,
357     .max_lun = 7,
358 
359     .transfer_data = esp_transfer_data,
360     .complete = esp_pci_command_complete,
361     .cancel = esp_request_cancelled,
362 };
363 
364 static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
365 {
366     PCIESPState *pci = PCI_ESP(dev);
367     DeviceState *d = DEVICE(dev);
368     ESPState *s = ESP(&pci->esp);
369     uint8_t *pci_conf;
370 
371     if (!qdev_realize(DEVICE(s), NULL, errp)) {
372         return;
373     }
374 
375     pci_conf = dev->config;
376 
377     /* Interrupt pin A */
378     pci_conf[PCI_INTERRUPT_PIN] = 0x01;
379 
380     s->dma_memory_read = esp_pci_dma_memory_read;
381     s->dma_memory_write = esp_pci_dma_memory_write;
382     s->dma_opaque = pci;
383     s->chip_id = TCHI_AM53C974;
384     memory_region_init_io(&pci->io, OBJECT(pci), &esp_pci_io_ops, pci,
385                           "esp-io", 0x80);
386 
387     pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
388     s->irq = pci_allocate_irq(dev);
389 
390     scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL);
391 }
392 
393 static void esp_pci_scsi_exit(PCIDevice *d)
394 {
395     PCIESPState *pci = PCI_ESP(d);
396     ESPState *s = ESP(&pci->esp);
397 
398     qemu_free_irq(s->irq);
399 }
400 
401 static void esp_pci_init(Object *obj)
402 {
403     PCIESPState *pci = PCI_ESP(obj);
404 
405     object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
406 }
407 
408 static void esp_pci_class_init(ObjectClass *klass, void *data)
409 {
410     DeviceClass *dc = DEVICE_CLASS(klass);
411     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
412 
413     k->realize = esp_pci_scsi_realize;
414     k->exit = esp_pci_scsi_exit;
415     k->vendor_id = PCI_VENDOR_ID_AMD;
416     k->device_id = PCI_DEVICE_ID_AMD_SCSI;
417     k->revision = 0x10;
418     k->class_id = PCI_CLASS_STORAGE_SCSI;
419     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
420     dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
421     dc->reset = esp_pci_hard_reset;
422     dc->vmsd = &vmstate_esp_pci_scsi;
423 }
424 
425 static const TypeInfo esp_pci_info = {
426     .name = TYPE_AM53C974_DEVICE,
427     .parent = TYPE_PCI_DEVICE,
428     .instance_init = esp_pci_init,
429     .instance_size = sizeof(PCIESPState),
430     .class_init = esp_pci_class_init,
431     .interfaces = (InterfaceInfo[]) {
432         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
433         { },
434     },
435 };
436 
437 struct DC390State {
438     PCIESPState pci;
439     eeprom_t *eeprom;
440 };
441 typedef struct DC390State DC390State;
442 
443 #define TYPE_DC390_DEVICE "dc390"
444 DECLARE_INSTANCE_CHECKER(DC390State, DC390,
445                          TYPE_DC390_DEVICE)
446 
447 #define EE_ADAPT_SCSI_ID 64
448 #define EE_MODE2         65
449 #define EE_DELAY         66
450 #define EE_TAG_CMD_NUM   67
451 #define EE_ADAPT_OPTIONS 68
452 #define EE_BOOT_SCSI_ID  69
453 #define EE_BOOT_SCSI_LUN 70
454 #define EE_CHKSUM1       126
455 #define EE_CHKSUM2       127
456 
457 #define EE_ADAPT_OPTION_F6_F8_AT_BOOT   0x01
458 #define EE_ADAPT_OPTION_BOOT_FROM_CDROM 0x02
459 #define EE_ADAPT_OPTION_INT13           0x04
460 #define EE_ADAPT_OPTION_SCAM_SUPPORT    0x08
461 
462 
463 static uint32_t dc390_read_config(PCIDevice *dev, uint32_t addr, int l)
464 {
465     DC390State *pci = DC390(dev);
466     uint32_t val;
467 
468     val = pci_default_read_config(dev, addr, l);
469 
470     if (addr == 0x00 && l == 1) {
471         /* First byte of address space is AND-ed with EEPROM DO line */
472         if (!eeprom93xx_read(pci->eeprom)) {
473             val &= ~0xff;
474         }
475     }
476 
477     return val;
478 }
479 
480 static void dc390_write_config(PCIDevice *dev,
481                                uint32_t addr, uint32_t val, int l)
482 {
483     DC390State *pci = DC390(dev);
484     if (addr == 0x80) {
485         /* EEPROM write */
486         int eesk = val & 0x80 ? 1 : 0;
487         int eedi = val & 0x40 ? 1 : 0;
488         eeprom93xx_write(pci->eeprom, 1, eesk, eedi);
489     } else if (addr == 0xc0) {
490         /* EEPROM CS low */
491         eeprom93xx_write(pci->eeprom, 0, 0, 0);
492     } else {
493         pci_default_write_config(dev, addr, val, l);
494     }
495 }
496 
497 static void dc390_scsi_realize(PCIDevice *dev, Error **errp)
498 {
499     DC390State *pci = DC390(dev);
500     Error *err = NULL;
501     uint8_t *contents;
502     uint16_t chksum = 0;
503     int i;
504 
505     /* init base class */
506     esp_pci_scsi_realize(dev, &err);
507     if (err) {
508         error_propagate(errp, err);
509         return;
510     }
511 
512     /* EEPROM */
513     pci->eeprom = eeprom93xx_new(DEVICE(dev), 64);
514 
515     /* set default eeprom values */
516     contents = (uint8_t *)eeprom93xx_data(pci->eeprom);
517 
518     for (i = 0; i < 16; i++) {
519         contents[i * 2] = 0x57;
520         contents[i * 2 + 1] = 0x00;
521     }
522     contents[EE_ADAPT_SCSI_ID] = 7;
523     contents[EE_MODE2] = 0x0f;
524     contents[EE_TAG_CMD_NUM] = 0x04;
525     contents[EE_ADAPT_OPTIONS] = EE_ADAPT_OPTION_F6_F8_AT_BOOT
526                                | EE_ADAPT_OPTION_BOOT_FROM_CDROM
527                                | EE_ADAPT_OPTION_INT13;
528 
529     /* update eeprom checksum */
530     for (i = 0; i < EE_CHKSUM1; i += 2) {
531         chksum += contents[i] + (((uint16_t)contents[i + 1]) << 8);
532     }
533     chksum = 0x1234 - chksum;
534     contents[EE_CHKSUM1] = chksum & 0xff;
535     contents[EE_CHKSUM2] = chksum >> 8;
536 }
537 
538 static void dc390_class_init(ObjectClass *klass, void *data)
539 {
540     DeviceClass *dc = DEVICE_CLASS(klass);
541     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
542 
543     k->realize = dc390_scsi_realize;
544     k->config_read = dc390_read_config;
545     k->config_write = dc390_write_config;
546     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
547     dc->desc = "Tekram DC-390 SCSI adapter";
548 }
549 
550 static const TypeInfo dc390_info = {
551     .name = TYPE_DC390_DEVICE,
552     .parent = TYPE_AM53C974_DEVICE,
553     .instance_size = sizeof(DC390State),
554     .class_init = dc390_class_init,
555 };
556 
557 static void esp_pci_register_types(void)
558 {
559     type_register_static(&esp_pci_info);
560     type_register_static(&dc390_info);
561 }
562 
563 type_init(esp_pci_register_types)
564