Lines Matching +full:bus +full:- +full:addr
2 * QEMU IDE Emulation: PCI Bus support.
31 #include "qemu/error-report.h"
34 #include "ide-internal.h"
43 static uint64_t pci_ide_status_read(void *opaque, hwaddr addr, unsigned size) in pci_ide_status_read() argument
45 IDEBus *bus = opaque; in pci_ide_status_read() local
47 if (addr != 2 || size != 1) { in pci_ide_status_read()
48 return ((uint64_t)1 << (size * 8)) - 1; in pci_ide_status_read()
50 return ide_status_read(bus, addr + 2); in pci_ide_status_read()
53 static void pci_ide_ctrl_write(void *opaque, hwaddr addr, in pci_ide_ctrl_write() argument
56 IDEBus *bus = opaque; in pci_ide_ctrl_write() local
58 if (addr != 2 || size != 1) { in pci_ide_ctrl_write()
61 ide_ctrl_write(bus, addr + 2, data); in pci_ide_ctrl_write()
70 static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size) in pci_ide_data_read() argument
72 IDEBus *bus = opaque; in pci_ide_data_read() local
75 return ide_ioport_read(bus, addr); in pci_ide_data_read()
76 } else if (addr == 0) { in pci_ide_data_read()
78 return ide_data_readw(bus, addr); in pci_ide_data_read()
80 return ide_data_readl(bus, addr); in pci_ide_data_read()
83 return ((uint64_t)1 << (size * 8)) - 1; in pci_ide_data_read()
86 static void pci_ide_data_write(void *opaque, hwaddr addr, in pci_ide_data_write() argument
89 IDEBus *bus = opaque; in pci_ide_data_write() local
92 ide_ioport_write(bus, addr, data); in pci_ide_data_write()
93 } else if (addr == 0) { in pci_ide_data_write()
95 ide_data_writew(bus, addr, data); in pci_ide_data_write()
97 ide_data_writel(bus, addr, data); in pci_ide_data_write()
111 uint8_t mode = d->config[PCI_CLASS_PROG]; in pci_ide_update_mode()
130 pci_config_set_interrupt_pin(d->config, 0); in pci_ide_update_mode()
133 if (!s->bus[0].portio_list.owner) { in pci_ide_update_mode()
134 portio_list_init(&s->bus[0].portio_list, OBJECT(d), in pci_ide_update_mode()
135 ide_portio_list, &s->bus[0], "ide"); in pci_ide_update_mode()
136 portio_list_add(&s->bus[0].portio_list, in pci_ide_update_mode()
140 if (!s->bus[0].portio2_list.owner) { in pci_ide_update_mode()
141 portio_list_init(&s->bus[0].portio2_list, OBJECT(d), in pci_ide_update_mode()
142 ide_portio2_list, &s->bus[0], "ide"); in pci_ide_update_mode()
143 portio_list_add(&s->bus[0].portio2_list, in pci_ide_update_mode()
147 if (!s->bus[1].portio_list.owner) { in pci_ide_update_mode()
148 portio_list_init(&s->bus[1].portio_list, OBJECT(d), in pci_ide_update_mode()
149 ide_portio_list, &s->bus[1], "ide"); in pci_ide_update_mode()
150 portio_list_add(&s->bus[1].portio_list, in pci_ide_update_mode()
154 if (!s->bus[1].portio2_list.owner) { in pci_ide_update_mode()
155 portio_list_init(&s->bus[1].portio2_list, OBJECT(d), in pci_ide_update_mode()
156 ide_portio2_list, &s->bus[1], "ide"); in pci_ide_update_mode()
157 portio_list_add(&s->bus[1].portio2_list, in pci_ide_update_mode()
166 pci_config_set_interrupt_pin(d->config, 1); in pci_ide_update_mode()
169 if (s->bus[0].portio_list.owner) { in pci_ide_update_mode()
170 portio_list_del(&s->bus[0].portio_list); in pci_ide_update_mode()
171 portio_list_destroy(&s->bus[0].portio_list); in pci_ide_update_mode()
174 if (s->bus[0].portio2_list.owner) { in pci_ide_update_mode()
175 portio_list_del(&s->bus[0].portio2_list); in pci_ide_update_mode()
176 portio_list_destroy(&s->bus[0].portio2_list); in pci_ide_update_mode()
179 if (s->bus[1].portio_list.owner) { in pci_ide_update_mode()
180 portio_list_del(&s->bus[1].portio_list); in pci_ide_update_mode()
181 portio_list_destroy(&s->bus[1].portio_list); in pci_ide_update_mode()
184 if (s->bus[1].portio2_list.owner) { in pci_ide_update_mode()
185 portio_list_del(&s->bus[1].portio2_list); in pci_ide_update_mode()
186 portio_list_destroy(&s->bus[1].portio2_list); in pci_ide_update_mode()
194 assert(bmdma->bus->retry_unit != (uint8_t)-1); in bmdma_active_if()
195 return bmdma->bus->ifs + bmdma->bus->retry_unit; in bmdma_active_if()
203 bm->dma_cb = dma_cb; in bmdma_start_dma()
204 bm->cur_prd_last = 0; in bmdma_start_dma()
205 bm->cur_prd_addr = 0; in bmdma_start_dma()
206 bm->cur_prd_len = 0; in bmdma_start_dma()
208 if (bm->status & BM_STATUS_DMAING) { in bmdma_start_dma()
209 bm->dma_cb(bmdma_active_if(bm), 0); in bmdma_start_dma()
217 * Returns the number of bytes prepared, -1 on error.
225 PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); in bmdma_prepare_buf()
227 uint32_t addr; in bmdma_prepare_buf() member
232 pci_dma_sglist_init(&s->sg, pci_dev, in bmdma_prepare_buf()
233 s->nsector / (BMDMA_PAGE_SIZE / BDRV_SECTOR_SIZE) + 1); in bmdma_prepare_buf()
234 s->io_buffer_size = 0; in bmdma_prepare_buf()
236 if (bm->cur_prd_len == 0) { in bmdma_prepare_buf()
238 if (bm->cur_prd_last || in bmdma_prepare_buf()
239 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { in bmdma_prepare_buf()
242 pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); in bmdma_prepare_buf()
243 bm->cur_addr += 8; in bmdma_prepare_buf()
244 prd.addr = le32_to_cpu(prd.addr); in bmdma_prepare_buf()
249 bm->cur_prd_len = len; in bmdma_prepare_buf()
250 bm->cur_prd_addr = prd.addr; in bmdma_prepare_buf()
251 bm->cur_prd_last = (prd.size & 0x80000000); in bmdma_prepare_buf()
253 l = bm->cur_prd_len; in bmdma_prepare_buf()
259 sg_len = MIN(limit - s->sg.size, bm->cur_prd_len); in bmdma_prepare_buf()
261 qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len); in bmdma_prepare_buf()
264 bm->cur_prd_addr += l; in bmdma_prepare_buf()
265 bm->cur_prd_len -= l; in bmdma_prepare_buf()
266 s->io_buffer_size += l; in bmdma_prepare_buf()
269 return s->sg.size; in bmdma_prepare_buf()
277 PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); in bmdma_rw_buf()
279 uint32_t addr; in bmdma_rw_buf() member
285 l = s->io_buffer_size - s->io_buffer_index; in bmdma_rw_buf()
288 if (bm->cur_prd_len == 0) { in bmdma_rw_buf()
290 if (bm->cur_prd_last || in bmdma_rw_buf()
291 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) in bmdma_rw_buf()
293 pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); in bmdma_rw_buf()
294 bm->cur_addr += 8; in bmdma_rw_buf()
295 prd.addr = le32_to_cpu(prd.addr); in bmdma_rw_buf()
300 bm->cur_prd_len = len; in bmdma_rw_buf()
301 bm->cur_prd_addr = prd.addr; in bmdma_rw_buf()
302 bm->cur_prd_last = (prd.size & 0x80000000); in bmdma_rw_buf()
304 if (l > bm->cur_prd_len) in bmdma_rw_buf()
305 l = bm->cur_prd_len; in bmdma_rw_buf()
308 pci_dma_write(pci_dev, bm->cur_prd_addr, in bmdma_rw_buf()
309 s->io_buffer + s->io_buffer_index, l); in bmdma_rw_buf()
311 pci_dma_read(pci_dev, bm->cur_prd_addr, in bmdma_rw_buf()
312 s->io_buffer + s->io_buffer_index, l); in bmdma_rw_buf()
314 bm->cur_prd_addr += l; in bmdma_rw_buf()
315 bm->cur_prd_len -= l; in bmdma_rw_buf()
316 s->io_buffer_index += l; in bmdma_rw_buf()
326 bm->dma_cb = NULL; in bmdma_set_inactive()
328 bm->status |= BM_STATUS_DMAING; in bmdma_set_inactive()
330 bm->status &= ~BM_STATUS_DMAING; in bmdma_set_inactive()
338 bm->cur_addr = bm->addr; in bmdma_restart_dma()
343 if (bm->status & BM_STATUS_DMAING) { in bmdma_cancel()
345 bmdma_set_inactive(&bm->dma, false); in bmdma_cancel()
355 bm->cmd = 0; in bmdma_reset()
356 bm->status = 0; in bmdma_reset()
357 bm->addr = 0; in bmdma_reset()
358 bm->cur_addr = 0; in bmdma_reset()
359 bm->cur_prd_last = 0; in bmdma_reset()
360 bm->cur_prd_addr = 0; in bmdma_reset()
361 bm->cur_prd_len = 0; in bmdma_reset()
370 qemu_set_irq(bm->irq, level); in bmdma_irq()
374 bm->status |= BM_STATUS_INT; in bmdma_irq()
377 qemu_set_irq(bm->irq, level); in bmdma_irq()
385 if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { in bmdma_cmd_writeb()
387 ide_cancel_dma_sync(ide_bus_active_if(bm->bus)); in bmdma_cmd_writeb()
388 bm->status &= ~BM_STATUS_DMAING; in bmdma_cmd_writeb()
390 bm->cur_addr = bm->addr; in bmdma_cmd_writeb()
391 if (!(bm->status & BM_STATUS_DMAING)) { in bmdma_cmd_writeb()
392 bm->status |= BM_STATUS_DMAING; in bmdma_cmd_writeb()
394 if (bm->dma_cb) in bmdma_cmd_writeb()
395 bm->dma_cb(bmdma_active_if(bm), 0); in bmdma_cmd_writeb()
400 bm->cmd = val & 0x09; in bmdma_cmd_writeb()
405 bm->status = (val & 0x60) | (bm->status & BM_STATUS_DMAING) in bmdma_status_writeb()
406 | (bm->status & ~val & (BM_STATUS_ERROR | BM_STATUS_INT)); in bmdma_status_writeb()
409 static uint64_t bmdma_addr_read(void *opaque, hwaddr addr, in bmdma_addr_read() argument
413 uint32_t mask = (1ULL << (width * 8)) - 1; in bmdma_addr_read()
416 data = (bm->addr >> (addr * 8)) & mask; in bmdma_addr_read()
421 static void bmdma_addr_write(void *opaque, hwaddr addr, in bmdma_addr_write() argument
425 int shift = addr * 8; in bmdma_addr_write()
426 uint32_t mask = (1ULL << (width * 8)) - 1; in bmdma_addr_write()
429 bm->addr &= ~(mask << shift); in bmdma_addr_write()
430 bm->addr |= ((data & mask) << shift) & ~3; in bmdma_addr_write()
443 return (bm->cur_prd_len != 0); in ide_bmdma_current_needed()
455 return ((bm->status & abused_bits) != 0); in ide_bmdma_status_needed()
463 if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) { in ide_bmdma_pre_save()
464 bm->bus->error_status = in ide_bmdma_pre_save()
465 ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd); in ide_bmdma_pre_save()
467 bm->migration_retry_unit = bm->bus->retry_unit; in ide_bmdma_pre_save()
468 bm->migration_retry_sector_num = bm->bus->retry_sector_num; in ide_bmdma_pre_save()
469 bm->migration_retry_nsector = bm->bus->retry_nsector; in ide_bmdma_pre_save()
470 bm->migration_compat_status = in ide_bmdma_pre_save()
471 (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); in ide_bmdma_pre_save()
476 /* This function accesses bm->bus->error_status which is loaded only after
484 if (bm->status == 0) { in ide_bmdma_post_load()
485 bm->status = bm->migration_compat_status & ~abused_bits; in ide_bmdma_post_load()
486 bm->bus->error_status |= bm->migration_compat_status & abused_bits; in ide_bmdma_post_load()
488 if (bm->bus->error_status) { in ide_bmdma_post_load()
489 bm->bus->retry_sector_num = bm->migration_retry_sector_num; in ide_bmdma_post_load()
490 bm->bus->retry_nsector = bm->migration_retry_nsector; in ide_bmdma_post_load()
491 bm->bus->retry_unit = bm->migration_retry_unit; in ide_bmdma_post_load()
530 VMSTATE_UINT32(addr, BMDMAState),
551 d->bmdma[i].migration_retry_unit &= 1; in ide_pci_post_load()
552 ide_bmdma_post_load(&d->bmdma[i], -1); in ide_pci_post_load()
567 VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
568 VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
569 VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
579 static const int bus[4] = { 0, 0, 1, 1 }; in pci_ide_create_devs() local
586 ide_bus_create_drive(d->bus + bus[i], unit[i], hd_table[i]); in pci_ide_create_devs()
600 void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d) in bmdma_init() argument
602 if (bus->dma == &bm->dma) { in bmdma_init()
606 bm->dma.ops = &bmdma_ops; in bmdma_init()
607 bus->dma = &bm->dma; in bmdma_init()
608 bm->irq = bus->irq; in bmdma_init()
609 bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0); in bmdma_init()
610 bm->bus = bus; in bmdma_init()
611 bm->pci_dev = d; in bmdma_init()
618 qdev_init_gpio_out_named(DEVICE(d), d->isa_irq, "isa-irq", in pci_ide_init()
619 ARRAY_SIZE(d->isa_irq)); in pci_ide_init()