1 /*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "hw/pci/pci_device.h"
28 #include "hw/irq.h"
29 #include "hw/nvram/eeprom93xx.h"
30 #include "hw/scsi/esp.h"
31 #include "migration/vmstate.h"
32 #include "trace.h"
33 #include "qapi/error.h"
34 #include "qemu/log.h"
35 #include "qemu/module.h"
36 #include "qom/object.h"
37
38 #define TYPE_AM53C974_DEVICE "am53c974"
39
40 typedef struct PCIESPState PCIESPState;
41 DECLARE_INSTANCE_CHECKER(PCIESPState, PCI_ESP,
42 TYPE_AM53C974_DEVICE)
43
44 #define DMA_CMD 0x0
45 #define DMA_STC 0x1
46 #define DMA_SPA 0x2
47 #define DMA_WBC 0x3
48 #define DMA_WAC 0x4
49 #define DMA_STAT 0x5
50 #define DMA_SMDLA 0x6
51 #define DMA_WMAC 0x7
52
53 #define DMA_CMD_MASK 0x03
54 #define DMA_CMD_DIAG 0x04
55 #define DMA_CMD_MDL 0x10
56 #define DMA_CMD_INTE_P 0x20
57 #define DMA_CMD_INTE_D 0x40
58 #define DMA_CMD_DIR 0x80
59
60 #define DMA_STAT_PWDN 0x01
61 #define DMA_STAT_ERROR 0x02
62 #define DMA_STAT_ABORT 0x04
63 #define DMA_STAT_DONE 0x08
64 #define DMA_STAT_SCSIINT 0x10
65 #define DMA_STAT_BCMBLT 0x20
66
67 #define SBAC_STATUS (1 << 24)
68
69 struct PCIESPState {
70 /*< private >*/
71 PCIDevice parent_obj;
72 /*< public >*/
73
74 MemoryRegion io;
75 uint32_t dma_regs[8];
76 uint32_t sbac;
77 ESPState esp;
78 };
79
esp_pci_update_irq(PCIESPState * pci)80 static void esp_pci_update_irq(PCIESPState *pci)
81 {
82 int scsi_level = !!(pci->dma_regs[DMA_STAT] & DMA_STAT_SCSIINT);
83 int dma_level = (pci->dma_regs[DMA_CMD] & DMA_CMD_INTE_D) ?
84 !!(pci->dma_regs[DMA_STAT] & DMA_STAT_DONE) : 0;
85 int level = scsi_level || dma_level;
86
87 pci_set_irq(PCI_DEVICE(pci), level);
88 }
89
esp_irq_handler(void * opaque,int irq_num,int level)90 static void esp_irq_handler(void *opaque, int irq_num, int level)
91 {
92 PCIESPState *pci = PCI_ESP(opaque);
93
94 if (level) {
95 pci->dma_regs[DMA_STAT] |= DMA_STAT_SCSIINT;
96
97 /*
98 * If raising the ESP IRQ to indicate end of DMA transfer, set
99 * DMA_STAT_DONE at the same time. In theory this should be done in
100 * esp_pci_dma_memory_rw(), however there is a delay between setting
101 * DMA_STAT_DONE and the ESP IRQ arriving which is visible to the
102 * guest that can cause confusion e.g. Linux
103 */
104 if ((pci->dma_regs[DMA_CMD] & DMA_CMD_MASK) == 0x3 &&
105 pci->dma_regs[DMA_WBC] == 0) {
106 pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
107 }
108 } else {
109 pci->dma_regs[DMA_STAT] &= ~DMA_STAT_SCSIINT;
110 }
111
112 esp_pci_update_irq(pci);
113 }
114
esp_pci_handle_idle(PCIESPState * pci,uint32_t val)115 static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
116 {
117 ESPState *s = &pci->esp;
118
119 trace_esp_pci_dma_idle(val);
120 esp_dma_enable(s, 0, 0);
121 }
122
esp_pci_handle_blast(PCIESPState * pci,uint32_t val)123 static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
124 {
125 trace_esp_pci_dma_blast(val);
126 qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
127 pci->dma_regs[DMA_STAT] |= DMA_STAT_BCMBLT;
128 }
129
esp_pci_handle_abort(PCIESPState * pci,uint32_t val)130 static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
131 {
132 ESPState *s = &pci->esp;
133
134 trace_esp_pci_dma_abort(val);
135 if (s->current_req) {
136 scsi_req_cancel(s->current_req);
137 }
138 }
139
esp_pci_handle_start(PCIESPState * pci,uint32_t val)140 static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
141 {
142 ESPState *s = &pci->esp;
143
144 trace_esp_pci_dma_start(val);
145
146 pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
147 pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
148 pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
149
150 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
151 | DMA_STAT_DONE | DMA_STAT_ABORT
152 | DMA_STAT_ERROR | DMA_STAT_PWDN);
153
154 esp_dma_enable(s, 0, 1);
155 }
156
esp_pci_dma_write(PCIESPState * pci,uint32_t saddr,uint32_t val)157 static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
158 {
159 trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
160 switch (saddr) {
161 case DMA_CMD:
162 pci->dma_regs[saddr] = val;
163 switch (val & DMA_CMD_MASK) {
164 case 0x0: /* IDLE */
165 esp_pci_handle_idle(pci, val);
166 break;
167 case 0x1: /* BLAST */
168 esp_pci_handle_blast(pci, val);
169 break;
170 case 0x2: /* ABORT */
171 esp_pci_handle_abort(pci, val);
172 break;
173 case 0x3: /* START */
174 esp_pci_handle_start(pci, val);
175 break;
176 default: /* can't happen */
177 abort();
178 }
179 break;
180 case DMA_STC:
181 case DMA_SPA:
182 case DMA_SMDLA:
183 pci->dma_regs[saddr] = val;
184 break;
185 case DMA_STAT:
186 if (pci->sbac & SBAC_STATUS) {
187 /* clear some bits on write */
188 uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
189 pci->dma_regs[DMA_STAT] &= ~(val & mask);
190 esp_pci_update_irq(pci);
191 }
192 break;
193 default:
194 trace_esp_pci_error_invalid_write_dma(val, saddr);
195 return;
196 }
197 }
198
esp_pci_dma_read(PCIESPState * pci,uint32_t saddr)199 static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
200 {
201 uint32_t val;
202
203 val = pci->dma_regs[saddr];
204 if (saddr == DMA_STAT) {
205 if (!(pci->sbac & SBAC_STATUS)) {
206 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
207 DMA_STAT_DONE);
208 esp_pci_update_irq(pci);
209 }
210 }
211
212 trace_esp_pci_dma_read(saddr, val);
213 return val;
214 }
215
esp_pci_io_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)216 static void esp_pci_io_write(void *opaque, hwaddr addr,
217 uint64_t val, unsigned int size)
218 {
219 PCIESPState *pci = opaque;
220 ESPState *s = &pci->esp;
221
222 if (size < 4 || addr & 3) {
223 /* need to upgrade request: we only support 4-bytes accesses */
224 uint32_t current = 0, mask;
225 int shift;
226
227 if (addr < 0x40) {
228 current = s->wregs[addr >> 2];
229 } else if (addr < 0x60) {
230 current = pci->dma_regs[(addr - 0x40) >> 2];
231 } else if (addr < 0x74) {
232 current = pci->sbac;
233 }
234
235 shift = (4 - size) * 8;
236 mask = (~(uint32_t)0 << shift) >> shift;
237
238 shift = ((4 - (addr & 3)) & 3) * 8;
239 val <<= shift;
240 val |= current & ~(mask << shift);
241 addr &= ~3;
242 size = 4;
243 }
244 g_assert(size >= 4);
245
246 if (addr < 0x40) {
247 /* SCSI core reg */
248 esp_reg_write(s, addr >> 2, val);
249 } else if (addr < 0x60) {
250 /* PCI DMA CCB */
251 esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
252 } else if (addr == 0x70) {
253 /* DMA SCSI Bus and control */
254 trace_esp_pci_sbac_write(pci->sbac, val);
255 pci->sbac = val;
256 } else {
257 trace_esp_pci_error_invalid_write((int)addr);
258 }
259 }
260
esp_pci_io_read(void * opaque,hwaddr addr,unsigned int size)261 static uint64_t esp_pci_io_read(void *opaque, hwaddr addr,
262 unsigned int size)
263 {
264 PCIESPState *pci = opaque;
265 ESPState *s = &pci->esp;
266 uint32_t ret;
267
268 if (addr < 0x40) {
269 /* SCSI core reg */
270 ret = esp_reg_read(s, addr >> 2);
271 } else if (addr < 0x60) {
272 /* PCI DMA CCB */
273 ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
274 } else if (addr == 0x70) {
275 /* DMA SCSI Bus and control */
276 trace_esp_pci_sbac_read(pci->sbac);
277 ret = pci->sbac;
278 } else {
279 /* Invalid region */
280 trace_esp_pci_error_invalid_read((int)addr);
281 ret = 0;
282 }
283
284 /* give only requested data */
285 ret >>= (addr & 3) * 8;
286 ret &= ~(~(uint64_t)0 << (8 * size));
287
288 return ret;
289 }
290
esp_pci_dma_memory_rw(PCIESPState * pci,uint8_t * buf,int len,DMADirection dir)291 static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
292 DMADirection dir)
293 {
294 dma_addr_t addr;
295 DMADirection expected_dir;
296
297 if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
298 expected_dir = DMA_DIRECTION_FROM_DEVICE;
299 } else {
300 expected_dir = DMA_DIRECTION_TO_DEVICE;
301 }
302
303 if (dir != expected_dir) {
304 trace_esp_pci_error_invalid_dma_direction();
305 return;
306 }
307
308 if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
309 qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
310 }
311
312 addr = pci->dma_regs[DMA_WAC];
313 if (pci->dma_regs[DMA_WBC] < len) {
314 len = pci->dma_regs[DMA_WBC];
315 }
316
317 pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir, MEMTXATTRS_UNSPECIFIED);
318
319 /* update status registers */
320 pci->dma_regs[DMA_WBC] -= len;
321 pci->dma_regs[DMA_WAC] += len;
322 }
323
esp_pci_dma_memory_read(void * opaque,uint8_t * buf,int len)324 static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
325 {
326 PCIESPState *pci = opaque;
327 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
328 }
329
esp_pci_dma_memory_write(void * opaque,uint8_t * buf,int len)330 static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
331 {
332 PCIESPState *pci = opaque;
333 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
334 }
335
336 static const MemoryRegionOps esp_pci_io_ops = {
337 .read = esp_pci_io_read,
338 .write = esp_pci_io_write,
339 .endianness = DEVICE_LITTLE_ENDIAN,
340 .impl = {
341 .min_access_size = 1,
342 .max_access_size = 4,
343 },
344 };
345
esp_pci_hard_reset(DeviceState * dev)346 static void esp_pci_hard_reset(DeviceState *dev)
347 {
348 PCIESPState *pci = PCI_ESP(dev);
349 ESPState *s = &pci->esp;
350
351 esp_hard_reset(s);
352 pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
353 | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
354 pci->dma_regs[DMA_WBC] &= ~0xffff;
355 pci->dma_regs[DMA_WAC] = 0xffffffff;
356 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
357 | DMA_STAT_DONE | DMA_STAT_ABORT
358 | DMA_STAT_ERROR);
359 pci->dma_regs[DMA_WMAC] = 0xfffffffd;
360 }
361
362 static const VMStateDescription vmstate_esp_pci_scsi = {
363 .name = "pciespscsi",
364 .version_id = 2,
365 .minimum_version_id = 1,
366 .pre_save = esp_pre_save,
367 .fields = (const VMStateField[]) {
368 VMSTATE_PCI_DEVICE(parent_obj, PCIESPState),
369 VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
370 VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2),
371 VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
372 VMSTATE_END_OF_LIST()
373 }
374 };
375
376 static const struct SCSIBusInfo esp_pci_scsi_info = {
377 .tcq = false,
378 .max_target = ESP_MAX_DEVS,
379 .max_lun = 7,
380
381 .transfer_data = esp_transfer_data,
382 .complete = esp_command_complete,
383 .cancel = esp_request_cancelled,
384 };
385
esp_pci_scsi_realize(PCIDevice * dev,Error ** errp)386 static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
387 {
388 PCIESPState *pci = PCI_ESP(dev);
389 DeviceState *d = DEVICE(dev);
390 ESPState *s = &pci->esp;
391 uint8_t *pci_conf;
392
393 if (!qdev_realize(DEVICE(s), NULL, errp)) {
394 return;
395 }
396
397 pci_conf = dev->config;
398
399 /* Interrupt pin A */
400 pci_conf[PCI_INTERRUPT_PIN] = 0x01;
401
402 s->dma_memory_read = esp_pci_dma_memory_read;
403 s->dma_memory_write = esp_pci_dma_memory_write;
404 s->dma_opaque = pci;
405 s->chip_id = TCHI_AM53C974;
406 memory_region_init_io(&pci->io, OBJECT(pci), &esp_pci_io_ops, pci,
407 "esp-io", 0x80);
408
409 pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
410 s->irq = qemu_allocate_irq(esp_irq_handler, pci, 0);
411
412 scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info);
413 }
414
esp_pci_scsi_exit(PCIDevice * d)415 static void esp_pci_scsi_exit(PCIDevice *d)
416 {
417 PCIESPState *pci = PCI_ESP(d);
418 ESPState *s = &pci->esp;
419
420 qemu_free_irq(s->irq);
421 }
422
esp_pci_init(Object * obj)423 static void esp_pci_init(Object *obj)
424 {
425 PCIESPState *pci = PCI_ESP(obj);
426
427 object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
428 }
429
esp_pci_class_init(ObjectClass * klass,void * data)430 static void esp_pci_class_init(ObjectClass *klass, void *data)
431 {
432 DeviceClass *dc = DEVICE_CLASS(klass);
433 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
434
435 k->realize = esp_pci_scsi_realize;
436 k->exit = esp_pci_scsi_exit;
437 k->vendor_id = PCI_VENDOR_ID_AMD;
438 k->device_id = PCI_DEVICE_ID_AMD_SCSI;
439 k->revision = 0x10;
440 k->class_id = PCI_CLASS_STORAGE_SCSI;
441 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
442 dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
443 device_class_set_legacy_reset(dc, esp_pci_hard_reset);
444 dc->vmsd = &vmstate_esp_pci_scsi;
445 }
446
447 static const TypeInfo esp_pci_info = {
448 .name = TYPE_AM53C974_DEVICE,
449 .parent = TYPE_PCI_DEVICE,
450 .instance_init = esp_pci_init,
451 .instance_size = sizeof(PCIESPState),
452 .class_init = esp_pci_class_init,
453 .interfaces = (InterfaceInfo[]) {
454 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
455 { },
456 },
457 };
458
459 struct DC390State {
460 PCIESPState pci;
461 eeprom_t *eeprom;
462 };
463 typedef struct DC390State DC390State;
464
465 #define TYPE_DC390_DEVICE "dc390"
DECLARE_INSTANCE_CHECKER(DC390State,DC390,TYPE_DC390_DEVICE)466 DECLARE_INSTANCE_CHECKER(DC390State, DC390,
467 TYPE_DC390_DEVICE)
468
469 #define EE_ADAPT_SCSI_ID 64
470 #define EE_MODE2 65
471 #define EE_DELAY 66
472 #define EE_TAG_CMD_NUM 67
473 #define EE_ADAPT_OPTIONS 68
474 #define EE_BOOT_SCSI_ID 69
475 #define EE_BOOT_SCSI_LUN 70
476 #define EE_CHKSUM1 126
477 #define EE_CHKSUM2 127
478
479 #define EE_ADAPT_OPTION_F6_F8_AT_BOOT 0x01
480 #define EE_ADAPT_OPTION_BOOT_FROM_CDROM 0x02
481 #define EE_ADAPT_OPTION_INT13 0x04
482 #define EE_ADAPT_OPTION_SCAM_SUPPORT 0x08
483
484
485 static uint32_t dc390_read_config(PCIDevice *dev, uint32_t addr, int l)
486 {
487 DC390State *pci = DC390(dev);
488 uint32_t val;
489
490 val = pci_default_read_config(dev, addr, l);
491
492 if (addr == 0x00 && l == 1) {
493 /* First byte of address space is AND-ed with EEPROM DO line */
494 if (!eeprom93xx_read(pci->eeprom)) {
495 val &= ~0xff;
496 }
497 }
498
499 return val;
500 }
501
dc390_write_config(PCIDevice * dev,uint32_t addr,uint32_t val,int l)502 static void dc390_write_config(PCIDevice *dev,
503 uint32_t addr, uint32_t val, int l)
504 {
505 DC390State *pci = DC390(dev);
506 if (addr == 0x80) {
507 /* EEPROM write */
508 int eesk = val & 0x80 ? 1 : 0;
509 int eedi = val & 0x40 ? 1 : 0;
510 eeprom93xx_write(pci->eeprom, 1, eesk, eedi);
511 } else if (addr == 0xc0) {
512 /* EEPROM CS low */
513 eeprom93xx_write(pci->eeprom, 0, 0, 0);
514 } else {
515 pci_default_write_config(dev, addr, val, l);
516 }
517 }
518
dc390_scsi_realize(PCIDevice * dev,Error ** errp)519 static void dc390_scsi_realize(PCIDevice *dev, Error **errp)
520 {
521 DC390State *pci = DC390(dev);
522 Error *err = NULL;
523 uint8_t *contents;
524 uint16_t chksum = 0;
525 int i;
526
527 /* init base class */
528 esp_pci_scsi_realize(dev, &err);
529 if (err) {
530 error_propagate(errp, err);
531 return;
532 }
533
534 /* EEPROM */
535 pci->eeprom = eeprom93xx_new(DEVICE(dev), 64);
536
537 /* set default eeprom values */
538 contents = (uint8_t *)eeprom93xx_data(pci->eeprom);
539
540 for (i = 0; i < 16; i++) {
541 contents[i * 2] = 0x57;
542 contents[i * 2 + 1] = 0x00;
543 }
544 contents[EE_ADAPT_SCSI_ID] = 7;
545 contents[EE_MODE2] = 0x0f;
546 contents[EE_TAG_CMD_NUM] = 0x04;
547 contents[EE_ADAPT_OPTIONS] = EE_ADAPT_OPTION_F6_F8_AT_BOOT
548 | EE_ADAPT_OPTION_BOOT_FROM_CDROM
549 | EE_ADAPT_OPTION_INT13;
550
551 /* update eeprom checksum */
552 for (i = 0; i < EE_CHKSUM1; i += 2) {
553 chksum += contents[i] + (((uint16_t)contents[i + 1]) << 8);
554 }
555 chksum = 0x1234 - chksum;
556 contents[EE_CHKSUM1] = chksum & 0xff;
557 contents[EE_CHKSUM2] = chksum >> 8;
558 }
559
dc390_class_init(ObjectClass * klass,void * data)560 static void dc390_class_init(ObjectClass *klass, void *data)
561 {
562 DeviceClass *dc = DEVICE_CLASS(klass);
563 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
564
565 k->realize = dc390_scsi_realize;
566 k->config_read = dc390_read_config;
567 k->config_write = dc390_write_config;
568 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
569 dc->desc = "Tekram DC-390 SCSI adapter";
570 }
571
572 static const TypeInfo dc390_info = {
573 .name = TYPE_DC390_DEVICE,
574 .parent = TYPE_AM53C974_DEVICE,
575 .instance_size = sizeof(DC390State),
576 .class_init = dc390_class_init,
577 };
578
esp_pci_register_types(void)579 static void esp_pci_register_types(void)
580 {
581 type_register_static(&esp_pci_info);
582 type_register_static(&dc390_info);
583 }
584
585 type_init(esp_pci_register_types)
586